source
stringlengths
3
92
c
stringlengths
26
2.25M
hecmw_partition.c
/***************************************************************************** * Copyright (c) 2019 FrontISTR Commons * This software is released under the MIT License, see LICENSE.txt *****************************************************************************/ #define INAGAKI_PARTITIONER #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <errno.h> #include <math.h> #include "hecmw_util.h" #include "hecmw_common.h" #include "hecmw_io.h" #include "hecmw_part_define.h" #include "hecmw_part_struct.h" #include "hecmw_part_log.h" #include "hecmw_mesh_hash_sort.h" #include "hecmw_mesh_edge_info.h" #include "hecmw_part_get_control.h" #include "hecmw_partition.h" #include "hecmw_ucd_print.h" #include "hecmw_graph.h" #include "hecmw_common_define.h" #ifdef HECMW_PART_WITH_METIS #include "metis.h" #endif #ifdef _OPENMP #include <omp.h> #endif #define INTERNAL 1 #define EXTERNAL 2 #define BOUNDARY 4 #define OVERLAP 8 #define MASK 16 #define MARK 32 #define MY_DOMAIN 1 #define NEIGHBOR_DOMAIN 2 #define MPC_BLOCK 4 #define CANDIDATE 8 #define EPS (1.0E-12) #define F_1_2 (0.5) #define F_6_10 (0.6) #define QSORT_LOWER 50 #define MASK_BIT(map, bit) ((map) |= (bit)) #define EVAL_BIT(map, bit) ((map) & (bit)) #define INV_BIT(map, bit) ((map) ^= (bit)) #define CLEAR_BIT(map, bit) \ ((map) |= (bit)); \ ((map) ^= (bit)) #define CLEAR_IEB(map) \ ((map) |= (7)); \ ((map) ^= (7)) #define CLEAR_MM(map) \ ((map) |= (48)); \ ((map) ^= (48)) #define DSWAP(a, aa) \ atemp = (a); \ (a) = (aa); \ (aa) = atemp; #define ISWAP(b, bb) \ btemp = (b); \ (b) = (bb); \ (bb) = btemp; #define RTC_NORMAL 0 #define RTC_ERROR (-1) #define RTC_WARN 1 #define MAX_NODE_SIZE 20 struct link_unit { int id; struct link_unit *next; }; struct link_list { int n; struct link_unit *list; struct link_unit *last; }; /*===== internal/boundary node/element list of each domain =======*/ static int *n_int_nlist = NULL; static int *n_bnd_nlist = NULL; static int *n_int_elist = NULL; static int *n_bnd_elist = NULL; static int **int_nlist = NULL; static int **bnd_nlist = NULL; static int **int_elist = NULL; static int **bnd_elist = NULL; static int **ngrp_idx = NULL; static int **ngrp_item = NULL; static int **egrp_idx = NULL; static int **egrp_item = NULL; /*===== speed up (K. Inagaki )=======*/ static int spdup_clear_MMbnd(char *node_flag, char *elem_flag, int current_domain) { int i, node, elem; for (i = 0; i < n_bnd_nlist[2 * current_domain + 1]; i++) { node = bnd_nlist[current_domain][i]; CLEAR_MM(node_flag[node - 1]); } for (i = 0; i < n_bnd_elist[2 * current_domain + 1]; i++) { elem = bnd_elist[current_domain][i]; CLEAR_MM(elem_flag[elem - 1]); } return RTC_NORMAL; } static int spdup_clear_IEB(char *node_flag, char *elem_flag, int current_domain) { int i, node, elem; for (i = 0; i < n_int_nlist[current_domain]; i++) { node = int_nlist[current_domain][i]; CLEAR_IEB(node_flag[node - 1]); } for (i = 0; i < n_bnd_nlist[2 * current_domain + 1]; i++) { node = bnd_nlist[current_domain][i]; CLEAR_IEB(node_flag[node - 1]); } for (i = 0; i < n_int_elist[current_domain]; i++) { elem = int_elist[current_domain][i]; CLEAR_IEB(elem_flag[elem - 1]); } for (i = 0; i < n_bnd_elist[2 * current_domain + 1]; i++) { elem = bnd_elist[current_domain][i]; CLEAR_IEB(elem_flag[elem - 1]); } return RTC_NORMAL; } static int spdup_init_list(const struct hecmwST_local_mesh *global_mesh) { int i, j, k; int js, je; int node, n_domain, domain[20], flag; /*init lists for count (calloc) */ n_int_nlist = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (n_int_nlist == NULL) { HECMW_set_error(errno, ""); goto error; } n_bnd_nlist = (int *)HECMW_calloc(2 * global_mesh->n_subdomain, sizeof(int)); if (n_bnd_nlist == NULL) { HECMW_set_error(errno, ""); goto error; } n_int_elist = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (n_int_elist == NULL) { HECMW_set_error(errno, ""); goto error; } n_bnd_elist = (int *)HECMW_calloc(2 * global_mesh->n_subdomain, sizeof(int)); if (n_bnd_elist == NULL) { HECMW_set_error(errno, ""); goto error; } int_nlist = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (int_nlist == NULL) { HECMW_set_error(errno, ""); goto error; } bnd_nlist = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (bnd_nlist == NULL) { HECMW_set_error(errno, ""); goto error; } int_elist = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (int_elist == NULL) { HECMW_set_error(errno, ""); goto error; } bnd_elist = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (bnd_elist == NULL) { HECMW_set_error(errno, ""); goto error; } /* count internal node */ for (i = 0; i < global_mesh->n_node; i++) { n_int_nlist[global_mesh->node_ID[2 * i + 1]]++; } /*count internal elem */ for (i = 0; i < global_mesh->n_elem; i++) { n_int_elist[global_mesh->elem_ID[2 * i + 1]]++; } /*count boundary node and elem */ for (i = 0; i < global_mesh->n_elem; i++) { js = global_mesh->elem_node_index[i]; je = global_mesh->elem_node_index[i + 1]; node = global_mesh->elem_node_item[js]; n_domain = 1; domain[0] = global_mesh->node_ID[2 * node - 1]; for (j = js + 1; j < je; j++) { node = global_mesh->elem_node_item[j]; for (flag = 0, k = 0; k < n_domain; k++) { if (global_mesh->node_ID[2 * node - 1] == domain[k]) { flag++; break; } } if (flag == 0) { domain[n_domain] = global_mesh->node_ID[2 * node - 1]; n_domain++; } } if (n_domain > 1) { for (j = 0; j < n_domain; j++) { n_bnd_elist[domain[j]]++; n_bnd_nlist[domain[j]] += je - js; } } } /*allocate node/element list of each domain */ for (i = 0; i < global_mesh->n_subdomain; i++) { int_nlist[i] = (int *)HECMW_calloc(n_int_nlist[i], sizeof(int)); if (int_nlist[i] == NULL) { HECMW_set_error(errno, ""); goto error; } bnd_nlist[i] = (int *)HECMW_calloc(n_bnd_nlist[i], sizeof(int)); if (bnd_nlist[i] == NULL) { HECMW_set_error(errno, ""); goto error; } int_elist[i] = (int *)HECMW_calloc(n_int_elist[i], sizeof(int)); if (int_elist[i] == NULL) { HECMW_set_error(errno, ""); goto error; } bnd_elist[i] = (int *)HECMW_calloc(n_bnd_elist[i], sizeof(int)); if (bnd_elist[i] == NULL) { HECMW_set_error(errno, ""); goto error; } } return RTC_NORMAL; error: return RTC_ERROR; } static int int_cmp(const void *v1, const void *v2) { const int *i1, *i2; i1 = (const int *)v1; i2 = (const int *)v2; if (*i1 < *i2) return -1; if (*i1 > *i2) return 1; return 0; } static int get_boundary_nodelist(const struct hecmwST_local_mesh *global_mesh, int domain) { int i, j, k; int ks, ke, node, elem, counter; for (counter = 0, j = 0; j < n_bnd_elist[2 * domain + 1]; j++) { elem = bnd_elist[domain][j]; ks = global_mesh->elem_node_index[elem - 1]; ke = global_mesh->elem_node_index[elem]; for (k = ks; k < ke; k++) { node = global_mesh->elem_node_item[k]; bnd_nlist[domain][counter] = node; counter++; } } qsort(bnd_nlist[domain], counter, sizeof(int), int_cmp); i = 1; for (j = 1; j < counter; j++) { if (bnd_nlist[domain][j - 1] != bnd_nlist[domain][j]) { bnd_nlist[domain][i] = bnd_nlist[domain][j]; i++; } } n_bnd_nlist[2 * domain + 1] = i; return RTC_NORMAL; } static int sort_and_resize_bndlist(const struct hecmwST_local_mesh *global_mesh, int domain) { int i, node, elem; int *work = NULL; int bnd_and_int, bnd_not_int; int n_nlist, n_elist; /*boundary node list */ n_nlist = n_bnd_nlist[2 * domain + 1]; work = (int *)HECMW_malloc(n_nlist * sizeof(int)); if (work == NULL) { HECMW_set_error(errno, ""); goto error; } /*sort */ bnd_and_int = 0; bnd_not_int = 0; for (i = 0; i < n_nlist; i++) { node = bnd_nlist[domain][i]; if (global_mesh->node_ID[2 * node - 1] == domain) { work[bnd_and_int] = node; bnd_and_int++; } } for (i = 0; i < n_nlist; i++) { node = bnd_nlist[domain][i]; if (global_mesh->node_ID[2 * node - 1] != domain) { work[bnd_and_int + bnd_not_int] = node; bnd_not_int++; } } n_bnd_nlist[2 * domain] = bnd_and_int; n_bnd_nlist[2 * domain + 1] = bnd_and_int + bnd_not_int; HECMW_assert(n_nlist == n_bnd_nlist[2 * domain + 1]); /*resize */ HECMW_free(bnd_nlist[domain]); bnd_nlist[domain] = (int *)HECMW_calloc(n_nlist, sizeof(int)); if (bnd_nlist[domain] == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < n_nlist; i++) { bnd_nlist[domain][i] = work[i]; } HECMW_free(work); /*boundary element list */ n_elist = n_bnd_elist[2 * domain + 1]; work = (int *)HECMW_malloc(n_elist * sizeof(int)); if (work == NULL) { HECMW_set_error(errno, ""); goto error; } /*sort */ bnd_and_int = 0; bnd_not_int = 0; for (i = 0; i < n_elist; i++) { elem = bnd_elist[domain][i]; if (global_mesh->elem_ID[2 * elem - 1] == domain) { work[bnd_and_int] = elem; bnd_and_int++; } } for (i = 0; i < n_elist; i++) { elem = bnd_elist[domain][i]; if (global_mesh->elem_ID[2 * elem - 1] != domain) { work[bnd_and_int + bnd_not_int] = elem; bnd_not_int++; } } n_bnd_elist[2 * domain] = bnd_and_int; n_bnd_elist[2 * domain + 1] = bnd_and_int + bnd_not_int; for (i = 0; i < n_elist; i++) { bnd_elist[domain][i] = work[i]; } HECMW_free(work); HECMW_assert(n_elist == n_bnd_elist[2 * domain + 1]); return RTC_NORMAL; error: return RTC_ERROR; } static int spdup_make_list(const struct hecmwST_local_mesh *global_mesh) { int i, j, k; int js, je, ks, ke; int node, elem, n_domain, domain[20], flag; int current_domain; int rtc; /*clear counters */ for (i = 0; i < global_mesh->n_subdomain; i++) { n_int_nlist[i] = 0; n_bnd_nlist[2 * i] = 0; n_bnd_nlist[2 * i + 1] = 0; n_int_elist[i] = 0; n_bnd_elist[2 * i] = 0; n_bnd_elist[2 * i + 1] = 0; } /* internal nodelist for each domain */ for (i = 0; i < global_mesh->n_node; i++) { current_domain = global_mesh->node_ID[2 * i + 1]; int_nlist[current_domain][n_int_nlist[current_domain]] = i + 1; n_int_nlist[current_domain]++; } /* internal elemlist for each domain */ for (i = 0; i < global_mesh->n_elem; i++) { current_domain = global_mesh->elem_ID[2 * i + 1]; int_elist[current_domain][n_int_elist[current_domain]] = i + 1; n_int_elist[current_domain]++; } /* boundary elemlist for each domain */ for (i = 0; i < global_mesh->n_elem; i++) { js = global_mesh->elem_node_index[i]; je = global_mesh->elem_node_index[i + 1]; node = global_mesh->elem_node_item[js]; n_domain = 1; domain[0] = global_mesh->node_ID[2 * node - 1]; for (j = js + 1; j < je; j++) { node = global_mesh->elem_node_item[j]; for (flag = 0, k = 0; k < n_domain; k++) { if (global_mesh->node_ID[2 * node - 1] == domain[k]) { flag++; break; } } if (flag == 0) { domain[n_domain] = global_mesh->node_ID[2 * node - 1]; n_domain++; } } if (n_domain > 1) { for (j = 0; j < n_domain; j++) { bnd_elist[domain[j]][n_bnd_elist[2 * domain[j] + 1]] = i + 1; n_bnd_elist[2 * domain[j] + 1]++; } } } /* boundary nodelist for each domain */ for (i = 0; i < global_mesh->n_subdomain; i++) { rtc = get_boundary_nodelist(global_mesh, i); if (rtc != RTC_NORMAL) goto error; } for (i = 0; i < global_mesh->n_subdomain; i++) { rtc = sort_and_resize_bndlist(global_mesh, i); if (rtc != RTC_NORMAL) goto error; } return RTC_NORMAL; error: return RTC_ERROR; } static int spdup_make_node_grouplist( const struct hecmwST_local_mesh *global_mesh) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; int i, j, k, node, n_bnd, n_out; int *n_domain = NULL; int **domain = NULL; int current_domain; int counter[global_mesh->n_subdomain]; /*make list of node to domain(both internal and boundary) */ n_domain = (int *)HECMW_calloc(global_mesh->n_node, sizeof(int)); if (n_domain == NULL) { HECMW_set_error(errno, ""); goto error; } /*count outer node(boundary and not internal) */ for (i = 0; i < global_mesh->n_subdomain; i++) { n_bnd = n_bnd_nlist[2 * i]; n_out = n_bnd_nlist[2 * i + 1] - n_bnd_nlist[2 * i]; if (n_out == 0) continue; for (j = 0; j < n_out; j++) { node = bnd_nlist[i][n_bnd + j]; n_domain[node - 1]++; } } /*make list */ domain = (int **)HECMW_malloc(global_mesh->n_node * sizeof(int *)); if (domain == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { domain[i] = (int *)HECMW_malloc((n_domain[i] + 1) * sizeof(int)); /*+1 means internal node */ if (domain[i] == NULL) { HECMW_set_error(errno, ""); goto error; } domain[i][0] = global_mesh->node_ID[2 * i + 1]; n_domain[i] = 1; } for (i = 0; i < global_mesh->n_subdomain; i++) { n_bnd = n_bnd_nlist[2 * i]; n_out = n_bnd_nlist[2 * i + 1] - n_bnd_nlist[2 * i]; if (n_out == 0) continue; for (j = 0; j < n_out; j++) { node = bnd_nlist[i][n_bnd + j]; domain[node - 1][n_domain[node - 1]] = i; n_domain[node - 1]++; } } /*make ngroup index list */ ngrp_idx = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (ngrp_idx == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_subdomain; i++) { ngrp_idx[i] = (int *)HECMW_calloc((node_group_global->n_grp + 1), sizeof(int)); if (ngrp_idx[i] == NULL) { HECMW_set_error(errno, ""); goto error; } } for (i = 0; i < node_group_global->n_grp; i++) { /*skip group "ALL" */ for (j = 0; j < global_mesh->n_subdomain; j++) { ngrp_idx[j][i + 1] = ngrp_idx[j][i]; } if (node_group_global->grp_index[i + 1] - node_group_global->grp_index[i] == global_mesh->n_node) { continue; } for (j = node_group_global->grp_index[i]; j < node_group_global->grp_index[i + 1]; j++) { node = node_group_global->grp_item[j]; for (k = 0; k < n_domain[node - 1]; k++) { current_domain = domain[node - 1][k]; ngrp_idx[current_domain][i + 1]++; } } } /*make ngroup item list */ ngrp_item = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (ngrp_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_subdomain; i++) { ngrp_item[i] = (int *)HECMW_malloc(ngrp_idx[i][node_group_global->n_grp] * sizeof(int)); if (ngrp_item[i] == NULL) { HECMW_set_error(errno, ""); goto error; } counter[i] = 0; } for (i = 0; i < node_group_global->n_grp; i++) { /*skip group "ALL" */ if (node_group_global->grp_index[i + 1] - node_group_global->grp_index[i] == global_mesh->n_node) { continue; } for (j = node_group_global->grp_index[i]; j < node_group_global->grp_index[i + 1]; j++) { node = node_group_global->grp_item[j]; for (k = 0; k < n_domain[node - 1]; k++) { current_domain = domain[node - 1][k]; ngrp_item[current_domain][counter[current_domain]] = node; counter[current_domain]++; } } } for (i = 0; i < global_mesh->n_node; i++) { HECMW_free(domain[i]); } HECMW_free(n_domain); HECMW_free(domain); return RTC_NORMAL; error: return RTC_ERROR; } static int spdup_make_element_grouplist( const struct hecmwST_local_mesh *global_mesh) { struct hecmwST_elem_grp *elem_group_global = global_mesh->elem_group; int i, j, k, elem, n_bnd, n_out; int *n_domain = NULL; int **domain = NULL; int current_domain; int counter[global_mesh->n_subdomain]; /*make list of elem to domain(both internal and boundary) */ n_domain = (int *)HECMW_calloc(global_mesh->n_elem, sizeof(int)); if (n_domain == NULL) { HECMW_set_error(errno, ""); goto error; } /*count outer elem(boundary and not internal) */ for (i = 0; i < global_mesh->n_subdomain; i++) { n_bnd = n_bnd_elist[2 * i]; n_out = n_bnd_elist[2 * i + 1] - n_bnd_elist[2 * i]; if (n_out == 0) continue; for (j = 0; j < n_out; j++) { elem = bnd_elist[i][n_bnd + j]; n_domain[elem - 1]++; } } /*make list */ domain = (int **)HECMW_malloc(global_mesh->n_elem * sizeof(int *)); if (domain == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_elem; i++) { domain[i] = (int *)HECMW_malloc((n_domain[i] + 1) * sizeof(int)); /*+1 means internal elem */ if (domain[i] == NULL) { HECMW_set_error(errno, ""); goto error; } domain[i][0] = global_mesh->elem_ID[2 * i + 1]; n_domain[i] = 1; } for (i = 0; i < global_mesh->n_subdomain; i++) { n_bnd = n_bnd_elist[2 * i]; n_out = n_bnd_elist[2 * i + 1] - n_bnd_elist[2 * i]; if (n_out == 0) continue; for (j = 0; j < n_out; j++) { elem = bnd_elist[i][n_bnd + j]; domain[elem - 1][n_domain[elem - 1]] = i; n_domain[elem - 1]++; } } /*make egroup index list */ egrp_idx = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (egrp_idx == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_subdomain; i++) { egrp_idx[i] = (int *)HECMW_calloc((elem_group_global->n_grp + 1), sizeof(int)); if (egrp_idx[i] == NULL) { HECMW_set_error(errno, ""); goto error; } } for (i = 0; i < elem_group_global->n_grp; i++) { /*skip group "ALL" */ for (j = 0; j < global_mesh->n_subdomain; j++) { egrp_idx[j][i + 1] = egrp_idx[j][i]; } if (elem_group_global->grp_index[i + 1] - elem_group_global->grp_index[i] == global_mesh->n_elem) { continue; } for (j = elem_group_global->grp_index[i]; j < elem_group_global->grp_index[i + 1]; j++) { elem = elem_group_global->grp_item[j]; for (k = 0; k < n_domain[elem - 1]; k++) { current_domain = domain[elem - 1][k]; egrp_idx[current_domain][i + 1]++; } } } /*make egroup item list */ egrp_item = (int **)HECMW_malloc(global_mesh->n_subdomain * sizeof(int *)); if (egrp_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_subdomain; i++) { egrp_item[i] = (int *)HECMW_malloc(egrp_idx[i][elem_group_global->n_grp] * sizeof(int)); if (egrp_item[i] == NULL) { HECMW_set_error(errno, ""); goto error; } counter[i] = 0; } for (i = 0; i < elem_group_global->n_grp; i++) { /*skip group "ALL" */ if (elem_group_global->grp_index[i + 1] - elem_group_global->grp_index[i] == global_mesh->n_elem) { continue; } for (j = elem_group_global->grp_index[i]; j < elem_group_global->grp_index[i + 1]; j++) { elem = elem_group_global->grp_item[j]; for (k = 0; k < n_domain[elem - 1]; k++) { current_domain = domain[elem - 1][k]; egrp_item[current_domain][counter[current_domain]] = elem; counter[current_domain]++; } } } for (i = 0; i < global_mesh->n_elem; i++) { HECMW_free(domain[i]); } HECMW_free(n_domain); HECMW_free(domain); return RTC_NORMAL; error: return RTC_ERROR; } static int spdup_makelist_main(const struct hecmwST_local_mesh *global_mesh) { int rtc; rtc = spdup_init_list(global_mesh); if (rtc != RTC_NORMAL) goto error; rtc = spdup_make_list(global_mesh); if (rtc != RTC_NORMAL) goto error; rtc = spdup_make_node_grouplist(global_mesh); if (rtc != RTC_NORMAL) goto error; rtc = spdup_make_element_grouplist(global_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } static void spdup_freelist(const struct hecmwST_local_mesh *global_mesh) { int i; HECMW_free(n_int_nlist); HECMW_free(n_bnd_nlist); HECMW_free(n_int_elist); HECMW_free(n_bnd_elist); for (i = 0; i < global_mesh->n_subdomain; i++) { HECMW_free(int_nlist[i]); HECMW_free(bnd_nlist[i]); HECMW_free(int_elist[i]); HECMW_free(bnd_elist[i]); HECMW_free(ngrp_idx[i]); HECMW_free(ngrp_item[i]); HECMW_free(egrp_idx[i]); HECMW_free(egrp_item[i]); } HECMW_free(int_nlist); HECMW_free(bnd_nlist); HECMW_free(int_elist); HECMW_free(bnd_elist); HECMW_free(ngrp_idx); HECMW_free(ngrp_item); HECMW_free(egrp_idx); HECMW_free(egrp_item); } static int is_spdup_available(const struct hecmwST_local_mesh *global_mesh) { return global_mesh->hecmw_flag_parttype == HECMW_FLAG_PARTTYPE_NODEBASED && global_mesh->hecmw_flag_partdepth == 1 && global_mesh->mpc->n_mpc == 0 && global_mesh->contact_pair->n_pair == 0; } /*================================================================================================*/ static char *get_dist_file_name(char *header, int domain, char *fname) { char s_domain[HECMW_NAME_LEN + 1]; sprintf(s_domain, "%d", domain); strcpy(fname, header); strcat(fname, "."); strcat(fname, s_domain); return fname; } static void free_link_list(struct link_unit *llist) { struct link_unit *p, *q; for (p = llist; p; p = q) { q = p->next; HECMW_free(p); } llist = NULL; } /*================================================================================================*/ static int init_struct_global(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } memset(local_mesh->gridfile, 0, HECMW_NAME_LEN + 1); local_mesh->hecmw_n_file = 0; local_mesh->files = NULL; memset(local_mesh->header, 0, HECMW_HEADER_LEN + 1); local_mesh->hecmw_flag_adapt = 0; local_mesh->hecmw_flag_initcon = 0; local_mesh->hecmw_flag_parttype = 0; local_mesh->hecmw_flag_partdepth = 0; local_mesh->hecmw_flag_version = 0; local_mesh->hecmw_flag_partcontact = 0; local_mesh->zero_temp = 0.0; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_node(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } local_mesh->n_node = 0; local_mesh->n_node_gross = 0; local_mesh->nn_internal = 0; local_mesh->node_internal_list = NULL; local_mesh->node = NULL; local_mesh->node_ID = NULL; local_mesh->global_node_ID = NULL; local_mesh->n_dof = 0; local_mesh->n_dof_grp = 0; local_mesh->node_dof_index = NULL; local_mesh->node_dof_item = NULL; local_mesh->node_val_index = NULL; local_mesh->node_val_item = NULL; local_mesh->node_init_val_index = NULL; local_mesh->node_init_val_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_elem(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } local_mesh->n_elem = 0; local_mesh->n_elem_gross = 0; local_mesh->ne_internal = 0; local_mesh->elem_internal_list = NULL; local_mesh->elem_ID = NULL; local_mesh->global_elem_ID = NULL; local_mesh->n_elem_type = 0; local_mesh->elem_type = NULL; local_mesh->elem_type_index = NULL; local_mesh->elem_type_item = NULL; local_mesh->elem_node_index = NULL; local_mesh->elem_node_item = NULL; local_mesh->section_ID = NULL; local_mesh->n_elem_mat_ID = 0; local_mesh->elem_mat_ID_index = NULL; local_mesh->elem_mat_ID_item = NULL; local_mesh->elem_mat_int_index = NULL; local_mesh->elem_mat_int_val = NULL; local_mesh->elem_val_index = NULL; local_mesh->elem_val_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_comm(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } local_mesh->zero = 0; local_mesh->PETOT = 0; local_mesh->PEsmpTOT = 0; local_mesh->my_rank = 0; local_mesh->errnof = 0; local_mesh->n_subdomain = 0; local_mesh->n_neighbor_pe = 0; local_mesh->neighbor_pe = NULL; local_mesh->import_index = NULL; local_mesh->import_item = NULL; local_mesh->export_index = NULL; local_mesh->export_item = NULL; local_mesh->shared_index = NULL; local_mesh->shared_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_adapt(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } local_mesh->coarse_grid_level = 0; local_mesh->n_adapt = 0; local_mesh->when_i_was_refined_node = NULL; local_mesh->when_i_was_refined_elem = NULL; local_mesh->adapt_parent_type = NULL; local_mesh->adapt_type = NULL; local_mesh->adapt_level = NULL; local_mesh->adapt_parent = NULL; local_mesh->adapt_children_index = NULL; local_mesh->adapt_children_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_sect(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->section == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->section\' is NULL"); goto error; } local_mesh->section->n_sect = 0; local_mesh->section->sect_type = NULL; local_mesh->section->sect_opt = NULL; local_mesh->section->sect_mat_ID_index = NULL; local_mesh->section->sect_mat_ID_item = NULL; local_mesh->section->sect_I_index = NULL; local_mesh->section->sect_I_item = NULL; local_mesh->section->sect_R_index = NULL; local_mesh->section->sect_R_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_mat(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->material == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->material\' is NULL"); goto error; } local_mesh->material->n_mat = 0; local_mesh->material->n_mat_item = 0; local_mesh->material->n_mat_subitem = 0; local_mesh->material->n_mat_table = 0; local_mesh->material->mat_name = NULL; local_mesh->material->mat_item_index = NULL; local_mesh->material->mat_subitem_index = NULL; local_mesh->material->mat_table_index = NULL; local_mesh->material->mat_val = NULL; local_mesh->material->mat_temp = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_mpc(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); return -1; } if (local_mesh->mpc == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->mpc\' is NULL"); goto error; } local_mesh->mpc->n_mpc = 0; local_mesh->mpc->mpc_index = NULL; local_mesh->mpc->mpc_item = NULL; local_mesh->mpc->mpc_dof = NULL; local_mesh->mpc->mpc_val = NULL; local_mesh->mpc->mpc_const = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_amp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->amp == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->amp\' is NULL"); goto error; } local_mesh->amp->n_amp = 0; local_mesh->amp->amp_name = NULL; local_mesh->amp->amp_type_definition = NULL; local_mesh->amp->amp_type_time = NULL; local_mesh->amp->amp_type_value = NULL; local_mesh->amp->amp_index = NULL; local_mesh->amp->amp_val = NULL; local_mesh->amp->amp_table = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_node_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->node_group == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->node_group\' is NULL"); goto error; } local_mesh->node_group->n_grp = 0; local_mesh->node_group->grp_name = NULL; local_mesh->node_group->grp_index = NULL; local_mesh->node_group->grp_item = NULL; local_mesh->node_group->n_bc = 0; local_mesh->node_group->bc_grp_ID = 0; local_mesh->node_group->bc_grp_type = 0; local_mesh->node_group->bc_grp_index = 0; local_mesh->node_group->bc_grp_dof = 0; local_mesh->node_group->bc_grp_val = 0; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_elem_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->elem_group == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->elem_group\' is NULL"); goto error; } local_mesh->elem_group->n_grp = 0; local_mesh->elem_group->grp_name = NULL; local_mesh->elem_group->grp_index = NULL; local_mesh->elem_group->grp_item = NULL; local_mesh->elem_group->n_bc = 0; local_mesh->elem_group->bc_grp_ID = NULL; local_mesh->elem_group->bc_grp_type = NULL; local_mesh->elem_group->bc_grp_index = NULL; local_mesh->elem_group->bc_grp_val = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_surf_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->surf_group == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->surf_group\' is NULL"); goto error; } local_mesh->surf_group->n_grp = 0; local_mesh->surf_group->grp_name = NULL; local_mesh->surf_group->grp_index = NULL; local_mesh->surf_group->grp_item = NULL; local_mesh->surf_group->n_bc = 0; local_mesh->surf_group->bc_grp_ID = NULL; local_mesh->surf_group->bc_grp_type = NULL; local_mesh->surf_group->bc_grp_index = NULL; local_mesh->surf_group->bc_grp_val = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static int init_struct_contact_pair(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh\' is NULL"); goto error; } if (local_mesh->contact_pair == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'local_mesh->contact_pair\' is NULL"); goto error; } local_mesh->contact_pair->n_pair = 0; local_mesh->contact_pair->name = NULL; local_mesh->contact_pair->type = NULL; local_mesh->contact_pair->slave_grp_id = NULL; local_mesh->contact_pair->master_grp_id = NULL; return RTC_NORMAL; error: return RTC_ERROR; } /*================================================================================================*/ static void clean_struct_global(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; init_struct_global(local_mesh); } static void clean_struct_node(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->node_internal_list) { HECMW_free(local_mesh->node_internal_list); } if (local_mesh->node) { HECMW_free(local_mesh->node); } if (local_mesh->node_ID) { HECMW_free(local_mesh->node_ID); } if (local_mesh->global_node_ID) { HECMW_free(local_mesh->global_node_ID); } if (local_mesh->node_dof_index) { HECMW_free(local_mesh->node_dof_index); } if (local_mesh->node_init_val_index) { HECMW_free(local_mesh->node_init_val_index); } if (local_mesh->node_init_val_item) { HECMW_free(local_mesh->node_init_val_item); } init_struct_node(local_mesh); } static void clean_struct_elem(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->elem_internal_list) { HECMW_free(local_mesh->elem_internal_list); } if (local_mesh->elem_ID) { HECMW_free(local_mesh->elem_ID); } if (local_mesh->global_elem_ID) { HECMW_free(local_mesh->global_elem_ID); } if (local_mesh->elem_type) { HECMW_free(local_mesh->elem_type); } if (local_mesh->elem_type_index) { HECMW_free(local_mesh->elem_type_index); } if (local_mesh->elem_node_index) { HECMW_free(local_mesh->elem_node_index); } if (local_mesh->elem_node_item) { HECMW_free(local_mesh->elem_node_item); } if (local_mesh->section_ID) { HECMW_free(local_mesh->section_ID); } if (local_mesh->elem_mat_ID_index) { HECMW_free(local_mesh->elem_mat_ID_index); } if (local_mesh->elem_mat_ID_item) { HECMW_free(local_mesh->elem_mat_ID_item); } init_struct_elem(local_mesh); } static void clean_struct_comm(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->neighbor_pe) { HECMW_free(local_mesh->neighbor_pe); } if (local_mesh->import_index) { HECMW_free(local_mesh->import_index); } if (local_mesh->import_item) { HECMW_free(local_mesh->import_item); } if (local_mesh->export_index) { HECMW_free(local_mesh->export_index); } if (local_mesh->export_item) { HECMW_free(local_mesh->export_item); } if (local_mesh->shared_index) { HECMW_free(local_mesh->shared_index); } if (local_mesh->shared_item) { HECMW_free(local_mesh->shared_item); } init_struct_comm(local_mesh); } static void clean_struct_adapt(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; init_struct_adapt(local_mesh); } static void clean_struct_sect(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->section == NULL) return; init_struct_sect(local_mesh); } static void clean_struct_mat(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->material == NULL) return; init_struct_mat(local_mesh); } static void clean_struct_mpc(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->mpc == NULL) return; HECMW_free(local_mesh->mpc->mpc_index); HECMW_free(local_mesh->mpc->mpc_item); HECMW_free(local_mesh->mpc->mpc_dof); HECMW_free(local_mesh->mpc->mpc_val); HECMW_free(local_mesh->mpc->mpc_const); init_struct_mpc(local_mesh); } static void clean_struct_amp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->amp == NULL) return; init_struct_amp(local_mesh); } static void clean_struct_node_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->node_group == NULL) return; if (local_mesh->node_group->grp_index) { HECMW_free(local_mesh->node_group->grp_index); } if (local_mesh->node_group->grp_item) { HECMW_free(local_mesh->node_group->grp_item); } init_struct_node_grp(local_mesh); } static void clean_struct_elem_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->elem_group == NULL) return; if (local_mesh->elem_group->grp_index) { HECMW_free(local_mesh->elem_group->grp_index); } if (local_mesh->elem_group->grp_item) { HECMW_free(local_mesh->elem_group->grp_item); } init_struct_elem_grp(local_mesh); } static void clean_struct_surf_grp(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->surf_group == NULL) return; if (local_mesh->surf_group->grp_index) { HECMW_free(local_mesh->surf_group->grp_index); } if (local_mesh->surf_group->grp_item) { HECMW_free(local_mesh->surf_group->grp_item); } init_struct_surf_grp(local_mesh); } static void clean_struct_contact_pair(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; if (local_mesh->contact_pair == NULL) return; if (local_mesh->contact_pair->type) { HECMW_free(local_mesh->contact_pair->type); } if (local_mesh->contact_pair->slave_grp_id) { HECMW_free(local_mesh->contact_pair->slave_grp_id); } if (local_mesh->contact_pair->master_grp_id) { HECMW_free(local_mesh->contact_pair->master_grp_id); } init_struct_contact_pair(local_mesh); } static void clean_struct_local_mesh(struct hecmwST_local_mesh *local_mesh) { if (local_mesh == NULL) return; clean_struct_global(local_mesh); clean_struct_node(local_mesh); clean_struct_elem(local_mesh); clean_struct_comm(local_mesh); clean_struct_adapt(local_mesh); clean_struct_sect(local_mesh); clean_struct_mat(local_mesh); clean_struct_mpc(local_mesh); clean_struct_amp(local_mesh); clean_struct_node_grp(local_mesh); clean_struct_elem_grp(local_mesh); clean_struct_surf_grp(local_mesh); clean_struct_contact_pair(local_mesh); } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int init_struct_result_data(struct hecmwST_result_data *result_data) { if (result_data == NULL) { HECMW_set_error(errno, "\'result_data\' is NULL"); goto error; } result_data->nn_dof = NULL; result_data->node_label = NULL; result_data->node_val_item = NULL; result_data->ne_dof = NULL; result_data->elem_label = NULL; result_data->elem_val_item = NULL; return RTC_NORMAL; error: return RTC_ERROR; } static void free_struct_result_data(struct hecmwST_result_data *result_data) { int i; if (result_data == NULL) return; HECMW_free(result_data->nn_dof); HECMW_free(result_data->ne_dof); if (result_data->node_label) { for (i = 0; i < result_data->nn_component; i++) { HECMW_free(result_data->node_label[i]); } HECMW_free(result_data->node_label); } if (result_data->elem_label) { for (i = 0; i < result_data->ne_component; i++) { HECMW_free(result_data->elem_label[i]); } HECMW_free(result_data->elem_label); } HECMW_free(result_data->node_val_item); HECMW_free(result_data->elem_val_item); HECMW_free(result_data); result_data = NULL; } /*================================================================================================*/ static int search_eqn_block_idx(const struct hecmwST_local_mesh *mesh) { int i; for (i = 0; i < mesh->node_group->n_grp; i++) { if (!strcmp(mesh->node_group->grp_name[i], HECMW_PART_EQUATION_BLOCK_NAME)) return i; } return -1; } /*================================================================================================*/ static int quick_sort(int no, int n, double *arr, int *brr, int *istack) { double a, atemp; int b, btemp; int i, ir, j, k, l; int jstack = 0; int nstack; nstack = no; l = 0; ir = n - 1; for (;;) { if (ir - l < QSORT_LOWER) { for (j = l + 1; j <= ir; j++) { a = arr[j]; b = brr[j]; for (i = j - 1; i >= l; i--) { if (arr[i] <= a) break; arr[i + 1] = arr[i]; brr[i + 1] = brr[i]; } arr[i + 1] = a; brr[i + 1] = b; } if (!jstack) return 0; ir = istack[jstack]; l = istack[jstack - 1]; jstack -= 2; } else { k = (l + ir) >> 1; DSWAP(arr[k], arr[l + 1]) ISWAP(brr[k], brr[l + 1]) if (arr[l] > arr[ir]) { DSWAP(arr[l], arr[ir]) ISWAP(brr[l], brr[ir]) } if (arr[l + 1] > arr[ir]) { DSWAP(arr[l + 1], arr[ir]) ISWAP(brr[l + 1], brr[ir]) } if (arr[l] > arr[l + 1]) { DSWAP(arr[l], arr[l + 1]) ISWAP(brr[l], brr[l + 1]) } i = l + 1; j = ir; a = arr[l + 1]; b = brr[l + 1]; for (;;) { do i++; while (arr[i] < a); do j--; while (arr[j] > a); if (j < i) break; DSWAP(arr[i], arr[j]) ISWAP(brr[i], brr[j]) } arr[l + 1] = arr[j]; arr[j] = a; brr[l + 1] = brr[j]; brr[j] = b; jstack += 2; if (jstack > nstack) { HECMW_set_error(HECMW_PART_E_STACK_OVERFLOW, ""); return -1; } if (ir - i + 1 >= j - l) { istack[jstack] = ir; istack[jstack - 1] = i; ir = j - 1; } else { istack[jstack] = j - 1; istack[jstack - 1] = l; l = i; } } } } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int rcb_partition(int n, const double *coord, int *wnum, const struct hecmw_part_cont_data *cont_data) { double *value; int *id, *stack; int rtc; int counter; int i, j, k; id = (int *)HECMW_malloc(sizeof(int) * n); if (id == NULL) { HECMW_set_error(errno, ""); goto error; } stack = (int *)HECMW_malloc(sizeof(int) * n); if (stack == NULL) { HECMW_set_error(errno, ""); goto error; } value = (double *)HECMW_malloc(sizeof(double) * n); if (value == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < cont_data->n_rcb_div; i++) { for (j = 0; j < pow(2, i); j++) { counter = 0; switch (cont_data->rcb_axis[i]) { case HECMW_PART_RCB_X_AXIS: /* X-axis */ for (k = 0; k < n; k++) { if (wnum[2 * k + 1] == j) { id[counter] = k; value[counter] = coord[3 * k]; counter++; } } break; case HECMW_PART_RCB_Y_AXIS: /* Y-axis */ for (k = 0; k < n; k++) { if (wnum[2 * k + 1] == j) { id[counter] = k; value[counter] = coord[3 * k + 1]; counter++; } } break; case HECMW_PART_RCB_Z_AXIS: /* Z-axis */ for (k = 0; k < n; k++) { if (wnum[2 * k + 1] == j) { id[counter] = k; value[counter] = coord[3 * k + 2]; counter++; } } break; default: HECMW_set_error(HECMW_PART_E_INVALID_RCB_DIR, ""); goto error; } /* quick sort */ rtc = quick_sort(n, counter, value, id, stack); if (rtc != RTC_NORMAL) goto error; /* belonging domain of node */ for (k = 0; k < counter * F_1_2; k++) { wnum[2 * id[k] + 1] = j + (int)pow(2, i); } } } HECMW_free(id); HECMW_free(stack); HECMW_free(value); return RTC_NORMAL; error: HECMW_free(id); HECMW_free(stack); HECMW_free(value); return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int calc_gravity(const struct hecmwST_local_mesh *global_mesh, double *coord) { double coord_x, coord_y, coord_z; int node; int js, je; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { js = global_mesh->elem_node_index[i]; je = global_mesh->elem_node_index[i + 1]; for (coord_x = 0.0, coord_y = 0.0, coord_z = 0.0, j = js; j < je; j++) { node = global_mesh->elem_node_item[j]; coord_x += global_mesh->node[3 * (node - 1)]; coord_y += global_mesh->node[3 * (node - 1) + 1]; coord_z += global_mesh->node[3 * (node - 1) + 2]; } coord[3 * i] = coord_x / (je - js); coord[3 * i + 1] = coord_y / (je - js); coord[3 * i + 2] = coord_z / (je - js); } return RTC_NORMAL; } static int rcb_partition_eb(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { double *coord = NULL; int rtc; coord = (double *)HECMW_malloc(sizeof(double) * global_mesh->n_elem * 3); if (coord == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = calc_gravity(global_mesh, coord); if (rtc != RTC_NORMAL) goto error; rtc = rcb_partition(global_mesh->n_elem, coord, global_mesh->elem_ID, cont_data); if (rtc != RTC_NORMAL) goto error; HECMW_free(coord); return RTC_NORMAL; error: HECMW_free(coord); return RTC_ERROR; } /*================================================================================================*/ static int create_node_graph_link_list( const struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_edge_data *edge_data, struct link_list **graph) { int node1, node2; long long int i; for (i = 0; i < edge_data->n_edge; i++) { node1 = edge_data->edge_node_item[2 * i]; node2 = edge_data->edge_node_item[2 * i + 1]; /* node 1 */ graph[node1 - 1]->last->next = (struct link_unit *)HECMW_malloc(sizeof(struct link_unit)); if (graph[node1 - 1]->last->next == NULL) { HECMW_set_error(errno, ""); goto error; } graph[node1 - 1]->n += 1; graph[node1 - 1]->last->next->id = node2; graph[node1 - 1]->last->next->next = NULL; graph[node1 - 1]->last = graph[node1 - 1]->last->next; /* node 2 */ graph[node2 - 1]->last->next = (struct link_unit *)HECMW_malloc(sizeof(struct link_unit)); if (graph[node2 - 1]->last->next == NULL) { HECMW_set_error(errno, ""); goto error; } graph[node2 - 1]->n += 1; graph[node2 - 1]->last->next->id = node1; graph[node2 - 1]->last->next->next = NULL; graph[node2 - 1]->last = graph[node2 - 1]->last->next; } return RTC_NORMAL; error: return RTC_ERROR; } static int create_node_graph_compress( const struct hecmwST_local_mesh *global_mesh, struct link_list **graph, int *node_graph_index, int *node_graph_item) { int counter; int i, j; struct link_unit *p; for (counter = 0, i = 0; i < global_mesh->n_node; i++) { node_graph_index[i + 1] = node_graph_index[i] + graph[i]->n; for (p = graph[i]->list, j = 0; j < graph[i]->n; j++) { p = p->next; node_graph_item[counter++] = p->id - 1; } } return RTC_NORMAL; } static int create_node_graph(const struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_edge_data *edge_data, int *node_graph_index, int *node_graph_item) { struct link_list **graph = NULL; int rtc; int i; graph = (struct link_list **)HECMW_malloc(sizeof(struct link_list *) * global_mesh->n_node); if (graph == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < global_mesh->n_node; i++) { graph[i] = NULL; } } for (i = 0; i < global_mesh->n_node; i++) { graph[i] = (struct link_list *)HECMW_malloc(sizeof(struct link_list)); if (graph[i] == NULL) { HECMW_set_error(errno, ""); goto error; } else { graph[i]->list = NULL; } } for (i = 0; i < global_mesh->n_node; i++) { graph[i]->list = (struct link_unit *)HECMW_malloc(sizeof(struct link_unit)); if (graph[i]->list == NULL) { HECMW_set_error(errno, ""); goto error; } else { graph[i]->n = 0; graph[i]->list->next = NULL; graph[i]->last = graph[i]->list; } } rtc = create_node_graph_link_list(global_mesh, edge_data, graph); if (rtc != RTC_NORMAL) goto error; rtc = create_node_graph_compress(global_mesh, graph, node_graph_index, node_graph_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < global_mesh->n_node; i++) { free_link_list(graph[i]->list); HECMW_free(graph[i]); } HECMW_free(graph); return RTC_NORMAL; error: if (graph) { for (i = 0; i < global_mesh->n_node; i++) { if (graph[i]) { free_link_list(graph[i]->list); HECMW_free(graph[i]); } } HECMW_free(graph); } return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int set_node_belong_elem(const struct hecmwST_local_mesh *global_mesh, struct hecmw_part_node_data *node_data) { int node, counter; struct link_list **node_list = NULL; struct link_unit *p; int size; int i, j; node_data->node_elem_index = NULL; node_data->node_elem_item = NULL; node_list = (struct link_list **)HECMW_malloc(sizeof(struct link_list *) * global_mesh->n_node); if (node_list == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < global_mesh->n_node; i++) { node_list[i] = NULL; } } for (i = 0; i < global_mesh->n_node; i++) { node_list[i] = (struct link_list *)HECMW_malloc(sizeof(struct link_list)); if (node_list[i] == NULL) { HECMW_set_error(errno, ""); goto error; } else { node_list[i]->list = NULL; } } for (i = 0; i < global_mesh->n_node; i++) { node_list[i]->list = (struct link_unit *)HECMW_malloc(sizeof(struct link_unit)); if (node_list[i]->list == NULL) { HECMW_set_error(errno, ""); goto error; } else { node_list[i]->n = 0; node_list[i]->list->next = NULL; node_list[i]->last = node_list[i]->list; } } for (i = 0; i < global_mesh->n_elem; i++) { for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; size = sizeof(struct link_list); node_list[node - 1]->last->next = (struct link_unit *)HECMW_malloc(size); if (node_list[node - 1]->last->next == NULL) { HECMW_set_error(errno, ""); goto error; } node_list[node - 1]->last = node_list[node - 1]->last->next; node_list[node - 1]->last->id = i + 1; node_list[node - 1]->last->next = NULL; node_list[node - 1]->n += 1; } } node_data->node_elem_index = (int *)HECMW_calloc(global_mesh->n_node + 1, sizeof(int)); if (node_data->node_elem_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { node_data->node_elem_index[i + 1] = node_data->node_elem_index[i] + node_list[i]->n; } size = sizeof(int) * node_data->node_elem_index[global_mesh->n_node]; node_data->node_elem_item = (int *)HECMW_malloc(size); if (node_data->node_elem_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_node; i++) { for (p = node_list[i]->list, j = 0; j < node_list[i]->n; j++) { p = p->next; node_data->node_elem_item[counter++] = p->id; } HECMW_assert(counter == node_data->node_elem_index[i + 1]); } for (i = 0; i < global_mesh->n_node; i++) { free_link_list(node_list[i]->list); HECMW_free(node_list[i]); } HECMW_free(node_list); return RTC_NORMAL; error: if (node_list) { for (i = 0; i < global_mesh->n_node; i++) { if (node_list[i]) { free_link_list(node_list[i]->list); HECMW_free(node_list[i]); } } HECMW_free(node_list); } HECMW_free(node_data->node_elem_index); HECMW_free(node_data->node_elem_item); node_data->node_elem_index = NULL; node_data->node_elem_item = NULL; return RTC_ERROR; } static int create_elem_graph_link_list( const struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_node_data *node_data, struct link_list **graph) { char *elem_flag = NULL; int elem, node; int size; int counter; int i, j, k; elem_flag = (char *)HECMW_malloc(sizeof(char) * global_mesh->n_elem); if (elem_flag == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { memset(elem_flag, 0, sizeof(char) * global_mesh->n_elem); MASK_BIT(elem_flag[i], MASK); for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; for (k = node_data->node_elem_index[node - 1]; k < node_data->node_elem_index[node]; k++) { elem = node_data->node_elem_item[k]; if (!EVAL_BIT(elem_flag[elem - 1], MASK)) { MASK_BIT(elem_flag[elem - 1], MASK); size = sizeof(struct link_unit); graph[i]->last->next = (struct link_unit *)HECMW_malloc(size); if (graph[i]->last->next == NULL) { HECMW_set_error(errno, ""); goto error; } graph[i]->n += 1; graph[i]->last->next->id = elem; graph[i]->last->next->next = NULL; graph[i]->last = graph[i]->last->next; counter++; } } } } HECMW_free(elem_flag); return counter; error: HECMW_free(elem_flag); return -1; } static int create_elem_graph_compress( const struct hecmwST_local_mesh *global_mesh, struct link_list **graph, int *elem_graph_index, int *elem_graph_item) { struct link_unit *p; int counter; int i, j; for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { elem_graph_index[i + 1] = elem_graph_index[i] + graph[i]->n; for (p = graph[i]->list, j = 0; j < graph[i]->n; j++) { p = p->next; elem_graph_item[counter++] = p->id - 1; } } HECMW_assert(elem_graph_index[global_mesh->n_elem] == counter); return RTC_NORMAL; } static int *create_elem_graph(const struct hecmwST_local_mesh *global_mesh, int *elem_graph_index) { struct hecmw_part_node_data *node_data = NULL; struct link_list **graph = NULL; int *elem_graph_item = NULL; int n_graph; int rtc; int i; node_data = (struct hecmw_part_node_data *)HECMW_malloc( sizeof(struct hecmw_part_node_data)); if (node_data == NULL) { HECMW_set_error(errno, ""); goto error; } else { node_data->node_elem_index = NULL; node_data->node_elem_item = NULL; } rtc = set_node_belong_elem(global_mesh, node_data); if (rtc != RTC_NORMAL) goto error; graph = (struct link_list **)HECMW_malloc(sizeof(struct link_list *) * global_mesh->n_elem); if (graph == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < global_mesh->n_elem; i++) { graph[i] = NULL; } } for (i = 0; i < global_mesh->n_elem; i++) { graph[i] = (struct link_list *)HECMW_malloc(sizeof(struct link_list)); if (graph[i] == NULL) { HECMW_set_error(errno, ""); goto error; } else { graph[i]->list = NULL; } } for (i = 0; i < global_mesh->n_elem; i++) { graph[i]->list = (struct link_unit *)HECMW_malloc(sizeof(struct link_unit)); if (graph[i]->list == NULL) { HECMW_set_error(errno, ""); goto error; } else { graph[i]->n = 0; graph[i]->list->next = NULL; graph[i]->last = graph[i]->list; } } n_graph = create_elem_graph_link_list(global_mesh, node_data, graph); if (n_graph < 0) goto error; elem_graph_item = (int *)HECMW_malloc(sizeof(int) * n_graph); if (elem_graph_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_elem_graph_compress(global_mesh, graph, elem_graph_index, elem_graph_item); if (rtc != RTC_NORMAL) goto error; HECMW_free(node_data->node_elem_index); HECMW_free(node_data->node_elem_item); HECMW_free(node_data); for (i = 0; i < global_mesh->n_elem; i++) { free_link_list(graph[i]->list); HECMW_free(graph[i]); } HECMW_free(graph); return elem_graph_item; error: if (node_data) { HECMW_free(node_data->node_elem_index); HECMW_free(node_data->node_elem_item); HECMW_free(node_data); } if (graph) { for (i = 0; i < global_mesh->n_elem; i++) { if (graph[i]) { free_link_list(graph[i]->list); HECMW_free(graph[i]); } } HECMW_free(graph); } HECMW_free(elem_graph_item); return NULL; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int pmetis_interface(const int n_vertex, const int n_domain, int *xadj, int *adjncy, int *part) { int edgecut = 0; /* number of edge-cut */ #ifdef HECMW_PART_WITH_METIS int n = n_vertex; /* number of vertices */ int *vwgt = NULL; /* weight for vertices */ int *adjwgt = NULL; /* weight for edges */ int nparts = n_domain; /* number of sub-domains */ #if defined(METIS_VER_MAJOR) && (METIS_VER_MAJOR == 5) int ncon = 1; /* number of balancing constraints */ int *vsize = NULL; real_t *tpwgts = NULL; real_t *ubvec = NULL; int *options = NULL; HECMW_log(HECMW_LOG_DEBUG, "Entering pmetis(v5)...\n"); METIS_PartGraphRecursive(&n, &ncon, xadj, adjncy, vwgt, vsize, adjwgt, &nparts, tpwgts, ubvec, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from pmetis(v5)\n"); #else int wgtflag = 0; /* flag of weight for edges */ int numflag = 0; /* flag of stating number of index */ int options[5] = {0, 0, 0, 0, 0}; /* options for pMETIS */ HECMW_log(HECMW_LOG_DEBUG, "Entering pmetis(v4)...\n"); METIS_PartGraphRecursive(&n, xadj, adjncy, vwgt, adjwgt, &wgtflag, &numflag, &nparts, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from pmetis(v4)\n"); #endif #endif return edgecut; } static int kmetis_interface(const int n_vertex, const int n_domain, int *xadj, int *adjncy, int *part) { int edgecut = 0; /* number of edge-cut */ #ifdef HECMW_PART_WITH_METIS int n = n_vertex; /* number of vertices */ int *vwgt = NULL; /* weight for vertices */ int *adjwgt = NULL; /* weight for edges */ int nparts = n_domain; /* number of sub-domains */ #if defined(METIS_VER_MAJOR) && (METIS_VER_MAJOR == 5) int ncon = 1; /* number of balancing constraints */ int *vsize = NULL; real_t *tpwgts = NULL; real_t *ubvec = NULL; int *options = NULL; HECMW_log(HECMW_LOG_DEBUG, "Entering kmetis(v5)...\n"); METIS_PartGraphKway(&n, &ncon, xadj, adjncy, vwgt, vsize, adjwgt, &nparts, tpwgts, ubvec, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from kmetis(v5)\n"); #else int wgtflag = 0; /* flag of weight for edges */ int numflag = 0; /* flag of stating number of index */ int options[5] = {0, 0, 0, 0, 0}; /* options for kMETIS */ HECMW_log(HECMW_LOG_DEBUG, "Entering kmetis(v4)...\n"); METIS_PartGraphKway(&n, xadj, adjncy, vwgt, adjwgt, &wgtflag, &numflag, &nparts, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from kmetis(v4)\n"); #endif #endif return edgecut; } static int pmetis_interface_with_weight(int n_vertex, int ncon, int n_domain, const int *xadj, const int *adjncy, const int *vwgt, int *part) { int edgecut = 0; /* number of edge-cut */ #ifdef HECMW_PART_WITH_METIS int n = n_vertex; /* number of vertices */ int *adjwgt = NULL; /* weight for edges */ int nparts = n_domain; /* number of sub-domains */ #if defined(METIS_VER_MAJOR) && (METIS_VER_MAJOR == 5) int *vsize = NULL; real_t *tpwgts = NULL; real_t *ubvec = NULL; int *options = NULL; HECMW_log(HECMW_LOG_DEBUG, "Entering pmetis(v5)...\n"); METIS_PartGraphRecursive(&n, &ncon, (int *)xadj, (int *)adjncy, (int *)vwgt, vsize, adjwgt, &nparts, tpwgts, ubvec, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from pmetis(v5)\n"); #else int wgtflag = 0; /* flag of weight for edges */ int numflag = 0; /* flag of stating number of index */ int options[5] = {0, 0, 0, 0, 0}; /* options for pMETIS */ if (vwgt != NULL) wgtflag = 2; HECMW_log(HECMW_LOG_DEBUG, "Entering pmetis(v4)...\n"); if (ncon == 1) { METIS_PartGraphRecursive(&n, (int *)xadj, (int *)adjncy, (int *)vwgt, adjwgt, &wgtflag, &numflag, &nparts, options, &edgecut, part); } else { METIS_mCPartGraphRecursive(&n, &ncon, (int *)xadj, (int *)adjncy, (int *)vwgt, adjwgt, &wgtflag, &numflag, &nparts, options, &edgecut, part); } HECMW_log(HECMW_LOG_DEBUG, "Returned from pmetis(v4)\n"); #endif #endif return edgecut; } static int kmetis_interface_with_weight(int n_vertex, int ncon, int n_domain, const int *xadj, const int *adjncy, const int *vwgt, int *part) { int edgecut = 0; /* number of edge-cut */ #ifdef HECMW_PART_WITH_METIS int n = n_vertex; /* number of vertices */ int *adjwgt = NULL; /* weight for edges */ int nparts = n_domain; /* number of sub-domains */ #if defined(METIS_VER_MAJOR) && (METIS_VER_MAJOR == 5) int *vsize = NULL; real_t *tpwgts = NULL; real_t *ubvec = NULL; int *options = NULL; HECMW_log(HECMW_LOG_DEBUG, "Entering kmetis(v5)...\n"); METIS_PartGraphKway(&n, &ncon, (int *)xadj, (int *)adjncy, (int *)vwgt, vsize, adjwgt, &nparts, tpwgts, ubvec, options, &edgecut, part); HECMW_log(HECMW_LOG_DEBUG, "Returned from kmetis(v5)\n"); #else int wgtflag = 0; /* flag of weight for edges */ int numflag = 0; /* flag of stating number of index */ float *ubvec = NULL; int options[5] = {0, 0, 0, 0, 0}; /* options for kMETIS */ if (vwgt != NULL) wgtflag = 2; if (ncon > 1) { ubvec = (float *)HECMW_malloc(ncon * sizeof(float)); if (ubvec == NULL) { HECMW_set_error(errno, ""); return -1; } } HECMW_log(HECMW_LOG_DEBUG, "Entering kmetis(v4)...\n"); if (ncon == 1) { METIS_PartGraphKway(&n, (int *)xadj, (int *)adjncy, (int *)vwgt, adjwgt, &wgtflag, &numflag, &nparts, options, &edgecut, part); } else { METIS_mCPartGraphKway(&n, &ncon, (int *)xadj, (int *)adjncy, (int *)vwgt, adjwgt, &wgtflag, &numflag, &nparts, ubvec, options, &edgecut, part); } HECMW_log(HECMW_LOG_DEBUG, "Returned from kmetis(v4)\n"); HECMW_free(ubvec); #endif #endif return edgecut; } static int contact_agg_mark_node_group(int *mark, struct hecmwST_local_mesh *global_mesh, int gid, int agg_id, int *agg_dup) { struct hecmwST_node_grp *ngrp = global_mesh->node_group; int istart, iend, i; HECMW_assert(0 < gid && gid <= ngrp->n_grp); istart = ngrp->grp_index[gid - 1]; iend = ngrp->grp_index[gid]; for (i = istart; i < iend; i++) { int nid = ngrp->grp_item[i] - 1; HECMW_assert(0 <= nid && nid < global_mesh->n_node); if (0 <= mark[nid] && mark[nid] < agg_id) { /* the node is included in some other contact pair */ if (*agg_dup == -1) { *agg_dup = mark[nid]; } else if (mark[nid] != *agg_dup) { fprintf(stderr, "ERROR: node included in multiple node groups in different " "contact pairs,\n" " which is not supported by CONTACT=AGGREGATE\n"); HECMW_abort(HECMW_comm_get_comm()); } } mark[nid] = agg_id; } return RTC_NORMAL; } static int HECMW_get_num_surf_node(int etype, int sid) { switch (etype) { case HECMW_ETYPE_TET1: case HECMW_ETYPE_PTT1: return 3; case HECMW_ETYPE_TET2: case HECMW_ETYPE_PTT2: return 6; case HECMW_ETYPE_HEX1: case HECMW_ETYPE_PTQ1: return 4; case HECMW_ETYPE_HEX2: case HECMW_ETYPE_PTQ2: return 8; case HECMW_ETYPE_PRI1: if (1 <= sid && sid <= 3) return 4; if (4 <= sid && sid <= 5) return 3; case HECMW_ETYPE_PRI2: if (1 <= sid && sid <= 3) return 8; if (4 <= sid && sid <= 5) return 6; default: fprintf( stderr, "ERROR: parallel contact analysis of elem type %d not supported\n", etype); return -1; } return -1; } static const int *HECMW_get_surf_node(int etype, int sid) { HECMW_assert(0 < sid); static const int elem_surf_tet1[4][3] = { {1, 2, 3}, {0, 3, 2}, {0, 1, 3}, {0, 2, 1}}; static const int elem_surf_tet2[4][6] = {{1, 4, 2, 9, 3, 8}, {0, 7, 3, 9, 2, 5}, {0, 6, 1, 8, 3, 7}, {0, 5, 2, 4, 1, 6}}; static const int elem_surf_hex1[6][4] = {{3, 0, 4, 7}, {1, 2, 6, 5}, {0, 1, 5, 4}, {2, 3, 7, 6}, {3, 2, 1, 0}, {4, 5, 6, 7}}; static const int elem_surf_hex2[6][8] = { {3, 11, 0, 16, 4, 15, 7, 19}, {1, 9, 2, 18, 6, 13, 5, 17}, {0, 8, 1, 17, 5, 12, 4, 16}, {2, 10, 3, 19, 7, 14, 6, 18}, {3, 10, 2, 9, 1, 8, 0, 11}, {4, 12, 5, 13, 6, 14, 7, 15}}; static const int elem_surf_pri1[5][4] = { {1, 2, 5, 4}, {2, 0, 3, 5}, {0, 1, 4, 3}, {2, 1, 0, -1}, {3, 4, 5, -1}}; static const int elem_surf_pri2[5][8] = {{1, 6, 2, 14, 5, 9, 4, 13}, {2, 7, 0, 12, 3, 10, 5, 14}, {0, 8, 1, 13, 4, 11, 3, 12}, {2, 6, 1, 8, 0, 7, -1, -1}, {3, 11, 4, 9, 5, 10, -1, -1}}; static const int elem_surf_ptt1[3] = {0, 1, 2}; static const int elem_surf_ptt2[6] = {0, 1, 2, 3, 4, 5}; static const int elem_surf_ptq1[4] = {0, 1, 2, 3}; static const int elem_surf_ptq2[8] = {0, 1, 2, 3, 4, 5, 6, 7}; switch (etype) { case HECMW_ETYPE_TET1: return elem_surf_tet1[sid - 1]; case HECMW_ETYPE_TET2: return elem_surf_tet2[sid - 1]; case HECMW_ETYPE_HEX1: return elem_surf_hex1[sid - 1]; case HECMW_ETYPE_HEX2: return elem_surf_hex2[sid - 1]; case HECMW_ETYPE_PRI1: return elem_surf_pri1[sid - 1]; case HECMW_ETYPE_PRI2: return elem_surf_pri2[sid - 1]; case HECMW_ETYPE_PTT1: return elem_surf_ptt1; case HECMW_ETYPE_PTT2: return elem_surf_ptt2; case HECMW_ETYPE_PTQ1: return elem_surf_ptq1; case HECMW_ETYPE_PTQ2: return elem_surf_ptq2; } fprintf(stderr, "ERROR: parallel contact analysis of element type %d not supported\n", etype); return NULL; } static int HECMW_fistr_get_num_surf_node(int etype, int sid) { switch (etype) { case HECMW_ETYPE_TET1: case HECMW_ETYPE_PTT1: return 3; case HECMW_ETYPE_TET2: case HECMW_ETYPE_PTT2: return 6; case HECMW_ETYPE_HEX1: case HECMW_ETYPE_PTQ1: return 4; case HECMW_ETYPE_HEX2: case HECMW_ETYPE_PTQ2: return 8; case HECMW_ETYPE_PRI1: if (1 <= sid && sid <= 2) return 3; if (3 <= sid && sid <= 5) return 4; case HECMW_ETYPE_PRI2: if (1 <= sid && sid <= 2) return 6; if (3 <= sid && sid <= 5) return 8; default: fprintf( stderr, "ERROR: parallel contact analysis of elem type %d not supported\n", etype); return -1; } return -1; } static const int *HECMW_fistr_get_surf_node(int etype, int sid) { HECMW_assert(0 < sid); static const int elem_surf_tet1[4][3] = { {0, 1, 2}, {0, 1, 3}, {1, 2, 3}, {2, 0, 3}}; static const int elem_surf_tet2[4][6] = {{0, 6, 1, 4, 2, 5}, {0, 6, 1, 8, 3, 7}, {1, 4, 2, 9, 3, 8}, {2, 5, 0, 9, 3, 7}}; static const int elem_surf_hex1[6][4] = {{0, 1, 2, 3}, {4, 5, 6, 7}, {0, 1, 5, 4}, {1, 2, 6, 5}, {2, 3, 7, 6}, {3, 0, 4, 7}}; static const int elem_surf_hex2[6][8] = { {0, 8, 1, 9, 2, 10, 3, 11}, {4, 12, 5, 13, 6, 14, 7, 15}, {0, 8, 1, 17, 5, 12, 4, 16}, {1, 9, 2, 18, 6, 13, 5, 17}, {2, 10, 3, 19, 7, 14, 6, 18}, {3, 11, 0, 16, 4, 15, 7, 19}}; static const int elem_surf_pri1[5][4] = { {0, 1, 2, -1}, {3, 4, 5, -1}, {0, 1, 4, 3}, {1, 2, 5, 4}, {2, 0, 3, 5}}; static const int elem_surf_pri2[5][8] = {{0, 8, 1, 6, 2, 7, -1, -1}, {3, 11, 4, 9, 5, 10, -1, -1}, {0, 8, 1, 13, 4, 11, 3, 12}, {1, 6, 2, 14, 5, 9, 4, 13}, {2, 7, 0, 12, 3, 10, 5, 14}}; static const int elem_surf_ptt1[3] = {0, 1, 2}; static const int elem_surf_ptt2[6] = {0, 1, 2, 3, 4, 5}; static const int elem_surf_ptq1[4] = {0, 1, 2, 3}; static const int elem_surf_ptq2[8] = {0, 1, 2, 3, 4, 5, 6, 7}; switch (etype) { case HECMW_ETYPE_TET1: return elem_surf_tet1[sid - 1]; case HECMW_ETYPE_TET2: return elem_surf_tet2[sid - 1]; case HECMW_ETYPE_HEX1: return elem_surf_hex1[sid - 1]; case HECMW_ETYPE_HEX2: return elem_surf_hex2[sid - 1]; case HECMW_ETYPE_PRI1: return elem_surf_pri1[sid - 1]; case HECMW_ETYPE_PRI2: return elem_surf_pri2[sid - 1]; case HECMW_ETYPE_PTT1: return elem_surf_ptt1; case HECMW_ETYPE_PTT2: return elem_surf_ptt2; case HECMW_ETYPE_PTQ1: return elem_surf_ptq1; case HECMW_ETYPE_PTQ2: return elem_surf_ptq2; } fprintf(stderr, "ERROR: parallel contact analysis of element type %d not supported\n", etype); return NULL; } static int mark_contact_master_nodes(struct hecmwST_local_mesh *global_mesh, int *mark) { int i, j, k; struct hecmwST_contact_pair *cp = global_mesh->contact_pair; struct hecmwST_surf_grp *sgrp = global_mesh->surf_group; for (i = 0; i < global_mesh->n_node; i++) { mark[i] = 0; } for (i = 0; i < cp->n_pair; i++) { int gid = cp->master_grp_id[i]; int jstart = sgrp->grp_index[gid - 1]; int jend = sgrp->grp_index[gid]; for (j = jstart; j < jend; j++) { int eid = sgrp->grp_item[j * 2] - 1; int sid = sgrp->grp_item[j * 2 + 1]; int *nop = global_mesh->elem_node_item + global_mesh->elem_node_index[eid]; int etype = global_mesh->elem_type[eid]; /** IF HEC-MW NUMBERING **/ /* int num_snode = HECMW_get_num_surf_node(etype, sid); */ /* const int *snode = HECMW_get_surf_node(etype, sid); */ /** ELSE IF FrontISTR NUMBERING **/ int num_snode = HECMW_fistr_get_num_surf_node(etype, sid); const int *snode = HECMW_fistr_get_surf_node(etype, sid); /** END IF **/ if (num_snode < 0 || snode == NULL) return RTC_ERROR; for (k = 0; k < num_snode; k++) { int nid = nop[snode[k]] - 1; HECMW_assert(0 <= nid && nid < global_mesh->n_node); mark[nid] = 1; } } } return RTC_NORMAL; } static int contact_agg_mark_surf_group(int *mark, struct hecmwST_local_mesh *global_mesh, int gid, int agg_id, int *agg_dup) { struct hecmwST_surf_grp *sgrp = global_mesh->surf_group; int istart, iend, i, j; HECMW_assert(0 < gid && gid <= sgrp->n_grp); /* get all nodes in the surface and mark them!!! */ istart = sgrp->grp_index[gid - 1]; iend = sgrp->grp_index[gid]; for (i = istart; i < iend; i++) { int eid = sgrp->grp_item[i * 2] - 1; int sid = sgrp->grp_item[i * 2 + 1]; int *nop = global_mesh->elem_node_item + global_mesh->elem_node_index[eid]; int etype = global_mesh->elem_type[eid]; /** IF HEC-WM NUMBERING **/ /* int num_snode = HECMW_get_num_surf_node(etype, sid); */ /* const int *snode = HECMW_get_surf_node(etype, sid); */ /** ELSE IF FrontISTR NUMBERING **/ int num_snode = HECMW_fistr_get_num_surf_node(etype, sid); const int *snode = HECMW_fistr_get_surf_node(etype, sid); /** END IF **/ if (num_snode < 0 || snode == NULL) return RTC_ERROR; for (j = 0; j < num_snode; j++) { int nid = nop[snode[j]] - 1; HECMW_assert(0 <= nid && nid < global_mesh->n_node); if (0 <= mark[nid] && mark[nid] < agg_id) { /* the node is included in some other contact pair */ if (*agg_dup == -1) { *agg_dup = mark[nid]; } else if (mark[nid] != *agg_dup) { fprintf(stderr, "ERROR: node included in multiple surface groups in " "different contact pairs,\n" " which is not supported by CONTACT=AGGREGATE\n"); HECMW_abort(HECMW_comm_get_comm()); } } mark[nid] = agg_id; } } return RTC_NORMAL; } static int metis_partition_nb_contact_agg( struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data, const struct hecmw_part_edge_data *edge_data) { int n_edgecut; int *node_graph_index = NULL; /* index for nodal graph */ int *node_graph_item = NULL; /* member of nodal graph */ int *belong_domain = NULL; int rtc; int i; struct hecmwST_contact_pair *cp; int *mark; int agg_id, agg_dup, gid; int n_node2; const int *node_graph_index2; const int *node_graph_item2; int *node_weight2; struct hecmw_graph graph1, graph2; const int ncon = 1; HECMW_assert(global_mesh->hecmw_flag_partcontact == HECMW_FLAG_PARTCONTACT_AGGREGATE); node_graph_index = (int *)HECMW_calloc(global_mesh->n_node + 1, sizeof(int)); if (node_graph_index == NULL) { HECMW_set_error(errno, ""); goto error; } node_graph_item = (int *)HECMW_malloc(sizeof(int) * edge_data->n_edge * 2); if (node_graph_item == NULL) { HECMW_set_error(errno, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Starting creation of node graph...\n"); rtc = create_node_graph(global_mesh, edge_data, node_graph_index, node_graph_item); if (rtc != RTC_NORMAL) goto error; HECMW_log(HECMW_LOG_DEBUG, "Creation of node graph done\n"); HECMW_log(HECMW_LOG_DEBUG, "Partitioning mode: contact-aggregate\n"); HECMW_log(HECMW_LOG_DEBUG, "Starting aggregation of contact pairs...\n"); /* aggregate contact pair if requested */ cp = global_mesh->contact_pair; mark = (int *)HECMW_malloc(global_mesh->n_node * sizeof(int)); if (mark == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { mark[i] = -1; } agg_id = 0; /* mark contact pairs */ for (i = 0; i < cp->n_pair; i++) { agg_dup = -1; /* slave */ if (cp->type[i] == HECMW_CONTACT_TYPE_NODE_SURF) { gid = cp->slave_grp_id[i]; rtc = contact_agg_mark_node_group(mark, global_mesh, gid, agg_id, &agg_dup); if (rtc != RTC_NORMAL) goto error; } else { /* HECMW_CONTACT_TYPE_SURF_SURF */ gid = cp->slave_grp_id[i]; rtc = contact_agg_mark_surf_group(mark, global_mesh, gid, agg_id, &agg_dup); if (rtc != RTC_NORMAL) goto error; } /* master */ gid = cp->master_grp_id[i]; rtc = contact_agg_mark_surf_group(mark, global_mesh, gid, agg_id, &agg_dup); if (rtc != RTC_NORMAL) goto error; if (agg_dup >= 0) { for (i = 0; i < global_mesh->n_node; i++) { if (mark[i] == agg_id) { mark[i] = agg_dup; } } } else { agg_id++; } } /* mark other nodes */ for (i = 0; i < global_mesh->n_node; i++) { if (mark[i] < 0) { mark[i] = agg_id++; } } n_node2 = agg_id; /* degenerate node graph */ rtc = HECMW_graph_init_with_arrays(&graph1, global_mesh->n_node, node_graph_index, node_graph_item); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_graph_init(&graph2); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_graph_degeneGraph(&graph2, &graph1, n_node2, mark); if (rtc != RTC_NORMAL) goto error; HECMW_graph_finalize(&graph1); node_graph_index2 = HECMW_graph_getEdgeIndex(&graph2); node_graph_item2 = HECMW_graph_getEdgeItem(&graph2); node_weight2 = (int *)HECMW_calloc(n_node2, sizeof(int)); if (node_weight2 == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { node_weight2[mark[i]] += 1; } HECMW_log(HECMW_LOG_DEBUG, "Aggregation of contact pairs done\n"); belong_domain = (int *)HECMW_calloc(n_node2, sizeof(int)); if (belong_domain == NULL) { HECMW_set_error(errno, ""); goto error; } switch (cont_data->method) { case HECMW_PART_METHOD_PMETIS: /* pMETIS */ n_edgecut = pmetis_interface_with_weight( n_node2, ncon, global_mesh->n_subdomain, node_graph_index2, node_graph_item2, node_weight2, belong_domain); if (n_edgecut < 0) goto error; break; case HECMW_PART_METHOD_KMETIS: /* kMETIS */ n_edgecut = kmetis_interface_with_weight( n_node2, ncon, global_mesh->n_subdomain, node_graph_index2, node_graph_item2, node_weight2, belong_domain); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { global_mesh->node_ID[2 * i + 1] = belong_domain[mark[i]]; } HECMW_graph_finalize(&graph2); HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(mark); HECMW_free(node_weight2); HECMW_free(belong_domain); return n_edgecut; error: HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(mark); HECMW_free(node_weight2); HECMW_free(belong_domain); return -1; } static int metis_partition_nb_contact_dist( struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data, const struct hecmw_part_edge_data *edge_data) { int n_edgecut; int *node_graph_index = NULL; /* index for nodal graph */ int *node_graph_item = NULL; /* member of nodal graph */ int *belong_domain = NULL; int rtc; int i; int ncon; int *node_weight = NULL; int *mark = NULL; HECMW_assert( global_mesh->hecmw_flag_partcontact == HECMW_FLAG_PARTCONTACT_SIMPLE || global_mesh->hecmw_flag_partcontact == HECMW_FLAG_PARTCONTACT_DISTRIBUTE); node_graph_index = (int *)HECMW_calloc(global_mesh->n_node + 1, sizeof(int)); if (node_graph_index == NULL) { HECMW_set_error(errno, ""); goto error; } node_graph_item = (int *)HECMW_malloc(sizeof(int) * edge_data->n_edge * 2); if (node_graph_item == NULL) { HECMW_set_error(errno, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Starting creation of node graph...\n"); rtc = create_node_graph(global_mesh, edge_data, node_graph_index, node_graph_item); if (rtc != RTC_NORMAL) goto error; HECMW_log(HECMW_LOG_DEBUG, "Creation of node graph done\n"); if (global_mesh->hecmw_flag_partcontact == HECMW_FLAG_PARTCONTACT_SIMPLE) { HECMW_log(HECMW_LOG_DEBUG, "Partitioning mode: contact-simple\n"); ncon = 1; node_weight = NULL; } else /* HECMW_FLAG_PARTCONTACT_DISTRIBUTE */ { HECMW_log(HECMW_LOG_DEBUG, "Partitioning mode: contact-distribute\n"); ncon = 2; mark = (int *)HECMW_calloc(global_mesh->n_node, sizeof(int)); if (mark == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = mark_contact_master_nodes(global_mesh, mark); if (rtc != RTC_NORMAL) goto error; node_weight = (int *)HECMW_calloc(global_mesh->n_node * ncon, sizeof(int)); if (node_weight == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { /* 1st condition: distribute nodes equally */ node_weight[i * ncon] = 1; /* 2nd condition: distribute master nodes equally */ node_weight[i * ncon + 1] = mark[i]; } HECMW_free(mark); } belong_domain = (int *)HECMW_calloc(global_mesh->n_node, sizeof(int)); if (belong_domain == NULL) { HECMW_set_error(errno, ""); goto error; } switch (cont_data->method) { case HECMW_PART_METHOD_PMETIS: /* pMETIS */ n_edgecut = pmetis_interface_with_weight( global_mesh->n_node, ncon, global_mesh->n_subdomain, node_graph_index, node_graph_item, node_weight, belong_domain); if (n_edgecut < 0) goto error; break; case HECMW_PART_METHOD_KMETIS: /* kMETIS */ n_edgecut = kmetis_interface_with_weight( global_mesh->n_node, ncon, global_mesh->n_subdomain, node_graph_index, node_graph_item, node_weight, belong_domain); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { global_mesh->node_ID[2 * i + 1] = belong_domain[i]; } HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(belong_domain); if (node_weight) HECMW_free(node_weight); return n_edgecut; error: HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(belong_domain); if (node_weight) HECMW_free(node_weight); if (mark) HECMW_free(mark); return -1; } static int metis_partition_nb_default( struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data, const struct hecmw_part_edge_data *edge_data) { int n_edgecut; int *node_graph_index = NULL; /* index for nodal graph */ int *node_graph_item = NULL; /* member of nodal graph */ int *belong_domain = NULL; int rtc; int i; node_graph_index = (int *)HECMW_calloc(global_mesh->n_node + 1, sizeof(int)); if (node_graph_index == NULL) { HECMW_set_error(errno, ""); goto error; } node_graph_item = (int *)HECMW_malloc(sizeof(int) * edge_data->n_edge * 2); if (node_graph_item == NULL) { HECMW_set_error(errno, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Starting creation of node graph...\n"); rtc = create_node_graph(global_mesh, edge_data, node_graph_index, node_graph_item); if (rtc != RTC_NORMAL) goto error; HECMW_log(HECMW_LOG_DEBUG, "Creation of node graph done\n"); belong_domain = (int *)HECMW_calloc(global_mesh->n_node, sizeof(int)); if (belong_domain == NULL) { HECMW_set_error(errno, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Partitioning mode: default\n"); switch (cont_data->method) { case HECMW_PART_METHOD_PMETIS: /* pMETIS */ n_edgecut = pmetis_interface(global_mesh->n_node, global_mesh->n_subdomain, node_graph_index, node_graph_item, belong_domain); if (n_edgecut < 0) goto error; break; case HECMW_PART_METHOD_KMETIS: /* kMETIS */ n_edgecut = kmetis_interface(global_mesh->n_node, global_mesh->n_subdomain, node_graph_index, node_graph_item, belong_domain); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } for (i = 0; i < global_mesh->n_node; i++) { global_mesh->node_ID[2 * i + 1] = belong_domain[i]; } HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(belong_domain); return n_edgecut; error: HECMW_free(node_graph_index); HECMW_free(node_graph_item); HECMW_free(belong_domain); return -1; } static int metis_partition_nb(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data, const struct hecmw_part_edge_data *edge_data) { if (global_mesh->contact_pair->n_pair > 0) { switch (global_mesh->hecmw_flag_partcontact) { case HECMW_FLAG_PARTCONTACT_AGGREGATE: return metis_partition_nb_contact_agg(global_mesh, cont_data, edge_data); case HECMW_FLAG_PARTCONTACT_DISTRIBUTE: case HECMW_FLAG_PARTCONTACT_SIMPLE: return metis_partition_nb_contact_dist(global_mesh, cont_data, edge_data); default: return -1; } } else { return metis_partition_nb_default(global_mesh, cont_data, edge_data); } } static int metis_partition_eb(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data, int *elem_graph_index, int *elem_graph_item) { int n_edgecut; int *belong_domain = NULL; int i; belong_domain = (int *)HECMW_calloc(global_mesh->n_elem, sizeof(int)); if (belong_domain == NULL) { HECMW_set_error(errno, ""); goto error; } switch (cont_data->method) { case HECMW_PART_METHOD_PMETIS: /* pMETIS */ n_edgecut = pmetis_interface(global_mesh->n_elem, global_mesh->n_subdomain, elem_graph_index, elem_graph_item, belong_domain); if (n_edgecut < 0) goto error; break; case HECMW_PART_METHOD_KMETIS: /* kMETIS */ n_edgecut = kmetis_interface(global_mesh->n_elem, global_mesh->n_subdomain, elem_graph_index, elem_graph_item, belong_domain); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } for (i = 0; i < global_mesh->n_elem; i++) { global_mesh->elem_ID[2 * i + 1] = belong_domain[i]; } HECMW_free(belong_domain); return n_edgecut; error: HECMW_free(belong_domain); return -1; } /*------------------------------------------------------------------------------------------------*/ static int set_node_belong_domain_nb( struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { struct hecmw_part_edge_data *edge_data = NULL; int n_edgecut; int rtc; long long int i; edge_data = (struct hecmw_part_edge_data *)HECMW_malloc( sizeof(struct hecmw_part_edge_data)); if (edge_data == NULL) { HECMW_set_error(errno, ""); goto error; } else { edge_data->n_edge = 0; edge_data->edge_node_item = NULL; } HECMW_log(HECMW_LOG_DEBUG, "Starting creation of mesh edge info...\n"); rtc = HECMW_mesh_edge_info(global_mesh, edge_data); if (rtc != 0) goto error; HECMW_log(HECMW_LOG_DEBUG, "Creation of mesh edge info done\n"); switch (cont_data->method) { case HECMW_PART_METHOD_RCB: /* RCB */ rtc = rcb_partition(global_mesh->n_node, global_mesh->node, global_mesh->node_ID, cont_data); if (rtc != RTC_NORMAL) goto error; for (n_edgecut = 0, i = 0; i < edge_data->n_edge; i++) { if (global_mesh ->node_ID[2 * (edge_data->edge_node_item[2 * i] - 1) + 1] != global_mesh ->node_ID[2 * (edge_data->edge_node_item[2 * i + 1] - 1) + 1]) { n_edgecut++; } } break; case HECMW_PART_METHOD_KMETIS: /* kMETIS */ case HECMW_PART_METHOD_PMETIS: /* pMETIS */ n_edgecut = metis_partition_nb(global_mesh, cont_data, edge_data); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } rtc = HECMW_part_set_log_n_edgecut(edge_data->n_edge, n_edgecut); if (rtc != RTC_NORMAL) goto error; /* commented out by K.Goto; begin */ /* rtc = eqn_block( global_mesh ); */ /* if( rtc != RTC_NORMAL ) goto error; */ /* commented out by K.Goto; end */ HECMW_free(edge_data->edge_node_item); HECMW_free(edge_data); return RTC_NORMAL; error: if (edge_data) { HECMW_free(edge_data->edge_node_item); } HECMW_free(edge_data); return RTC_ERROR; } static int set_node_belong_domain_eb(struct hecmwST_local_mesh *global_mesh) { int node; int i, j; for (i = 0; i < global_mesh->n_node; i++) { global_mesh->node_ID[2 * i + 1] = global_mesh->n_subdomain; } for (i = 0; i < global_mesh->n_elem; i++) { for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; if (global_mesh->elem_ID[2 * i + 1] < global_mesh->node_ID[2 * (node - 1) + 1]) { global_mesh->node_ID[2 * (node - 1) + 1] = global_mesh->elem_ID[2 * i + 1]; } } } return RTC_NORMAL; } static int set_local_node_id(struct hecmwST_local_mesh *global_mesh) { int *counter; int j, domain; counter = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (counter == NULL) { HECMW_set_error(errno, ""); goto error; } for (j = 0; j < global_mesh->n_node; j++) { domain = global_mesh->node_ID[2 * j + 1]; global_mesh->node_ID[2 * j] = ++counter[domain]; } HECMW_free(counter); return RTC_NORMAL; error: return RTC_ERROR; } static int wnumbering_node(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { int rtc; int i; HECMW_free(global_mesh->node_ID); global_mesh->node_ID = (int *)HECMW_malloc(sizeof(int) * global_mesh->n_node * 2); if (global_mesh->node_ID == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < global_mesh->n_node; i++) { global_mesh->node_ID[2 * i] = i + 1; global_mesh->node_ID[2 * i + 1] = 0; } } if (global_mesh->n_subdomain == 1) return RTC_NORMAL; switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ rtc = set_node_belong_domain_nb(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = set_node_belong_domain_eb(global_mesh); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, ""); goto error; } rtc = set_local_node_id(global_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int set_elem_belong_domain_nb(struct hecmwST_local_mesh *global_mesh) { int node, node_domain, min_domain; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { min_domain = global_mesh->n_subdomain; for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; node_domain = global_mesh->node_ID[2 * (node - 1) + 1]; if (node_domain < min_domain) { min_domain = node_domain; } } global_mesh->elem_ID[2 * i + 1] = min_domain; } return RTC_NORMAL; } static int count_edge_for_eb(const struct hecmwST_local_mesh *global_mesh, struct hecmw_part_edge_data *elem_data, int *elem_graph_index, int *elem_graph_item) { int rtc; long long int eid; int i, j; rtc = HECMW_mesh_hsort_edge_init(global_mesh->n_node, global_mesh->n_elem); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < global_mesh->n_elem; i++) { for (j = elem_graph_index[i]; j < elem_graph_index[i + 1]; j++) { eid = HECMW_mesh_hsort_edge(i + 1, elem_graph_item[j] + 1); if (eid < 0) goto error; } } elem_data->n_edge = HECMW_mesh_hsort_edge_get_n(); if (elem_data->n_edge < 0) goto error; elem_data->edge_node_item = HECMW_mesh_hsort_edge_get_v(); if (elem_data->edge_node_item == NULL) goto error; HECMW_mesh_hsort_edge_final(); return RTC_NORMAL; error: HECMW_mesh_hsort_edge_final(); return RTC_ERROR; } static int set_elem_belong_domain_eb( struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { int n_edgecut = 0; int *elem_graph_index = NULL; int *elem_graph_item = NULL; struct hecmw_part_edge_data *elem_data = NULL; int rtc; long long int i; elem_graph_index = (int *)HECMW_calloc(global_mesh->n_elem + 1, sizeof(int)); if (elem_graph_index == NULL) { HECMW_set_error(errno, ""); goto error; } elem_data = (struct hecmw_part_edge_data *)HECMW_malloc( sizeof(struct hecmw_part_edge_data)); if (elem_data == NULL) { HECMW_set_error(errno, ""); goto error; } else { elem_data->n_edge = 0; elem_data->edge_node_item = NULL; } HECMW_log(HECMW_LOG_DEBUG, "Starting creation of elem graph...\n"); elem_graph_item = create_elem_graph(global_mesh, elem_graph_index); if (elem_graph_item == NULL) goto error; HECMW_log(HECMW_LOG_DEBUG, "Creation of elem graph done\n"); rtc = count_edge_for_eb(global_mesh, elem_data, elem_graph_index, elem_graph_item); if (rtc != RTC_NORMAL) goto error; switch (cont_data->method) { case HECMW_PART_METHOD_RCB: /* RCB */ rtc = rcb_partition_eb(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; for (n_edgecut = 0, i = 0; i < elem_data->n_edge; i++) { if (global_mesh ->elem_ID[2 * (elem_data->edge_node_item[2 * i] - 1) + 1] != global_mesh ->elem_ID[2 * (elem_data->edge_node_item[2 * i + 1] - 1) + 1]) { n_edgecut++; } } break; case HECMW_PART_METHOD_PMETIS: /* pMETIS */ case HECMW_PART_METHOD_KMETIS: /* kMETIS */ n_edgecut = metis_partition_eb(global_mesh, cont_data, elem_graph_index, elem_graph_item); if (n_edgecut < 0) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PMETHOD, ""); goto error; } rtc = HECMW_part_set_log_n_edgecut(elem_data->n_edge, n_edgecut); if (rtc != RTC_NORMAL) goto error; HECMW_free(elem_graph_index); HECMW_free(elem_graph_item); HECMW_free(elem_data->edge_node_item); HECMW_free(elem_data); return RTC_NORMAL; error: HECMW_free(elem_graph_index); HECMW_free(elem_graph_item); if (elem_data) { HECMW_free(elem_data->edge_node_item); } HECMW_free(elem_data); return RTC_ERROR; } static int set_local_elem_id(struct hecmwST_local_mesh *global_mesh) { int *counter; int j, domain; counter = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (counter == NULL) { HECMW_set_error(errno, ""); goto error; } for (j = 0; j < global_mesh->n_elem; j++) { domain = global_mesh->elem_ID[2 * j + 1]; global_mesh->elem_ID[2 * j] = ++counter[domain]; } HECMW_free(counter); return RTC_NORMAL; error: return RTC_ERROR; } static int wnumbering_elem(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { int rtc; int i; HECMW_free(global_mesh->elem_ID); global_mesh->elem_ID = (int *)HECMW_malloc(sizeof(int) * global_mesh->n_elem * 2); if (global_mesh->elem_ID == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < global_mesh->n_elem; i++) { global_mesh->elem_ID[2 * i] = i + 1; global_mesh->elem_ID[2 * i + 1] = 0; } } if (global_mesh->n_subdomain == 1) return RTC_NORMAL; switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ rtc = set_elem_belong_domain_nb(global_mesh); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = set_elem_belong_domain_eb(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, ""); goto error; } rtc = set_local_elem_id(global_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } static int wnumbering(struct hecmwST_local_mesh *global_mesh, const struct hecmw_part_cont_data *cont_data) { int rtc; HECMW_assert(global_mesh); HECMW_assert(cont_data); HECMW_log(HECMW_LOG_DEBUG, "Starting double numbering..."); switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ rtc = wnumbering_node(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; rtc = wnumbering_elem(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = wnumbering_elem(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; rtc = wnumbering_node(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Double numbering done"); return RTC_NORMAL; error: return RTC_ERROR; } /*================================================================================================== create neighboring domain & communication information ==================================================================================================*/ /*K. Inagaki */ static int mask_node_by_domain(const struct hecmwST_local_mesh *global_mesh, char *node_flag, int current_domain) { int i, node; for (i = 0; i < n_int_nlist[current_domain]; i++) { node = int_nlist[current_domain][i]; MASK_BIT(node_flag[node - 1], INTERNAL); } return RTC_NORMAL; } static int mask_elem_by_domain(const struct hecmwST_local_mesh *global_mesh, char *elem_flag, int current_domain) { int i; for (i = 0; i < global_mesh->n_elem; i++) { (global_mesh->elem_ID[2 * i + 1] == current_domain) ? MASK_BIT(elem_flag[i], INTERNAL) : MASK_BIT(elem_flag[i], EXTERNAL); } return RTC_NORMAL; } /*K. Inagaki */ static int mask_elem_by_domain_mod(char *elem_flag, int current_domain) { int i, elem; for (i = 0; i < n_int_elist[current_domain]; i++) { elem = int_elist[current_domain][i]; MASK_BIT(elem_flag[elem - 1], INTERNAL); } return RTC_NORMAL; } static int mask_slave_node(const struct hecmwST_local_mesh *global_mesh, char *node_flag, int current_domain) { int i; for (i = 0; i < global_mesh->mpc->n_mpc; i++) { int j0, je, slave, master, j, evalsum; j0 = global_mesh->mpc->mpc_index[i]; je = global_mesh->mpc->mpc_index[i + 1]; slave = global_mesh->mpc->mpc_item[j0]; /* mask all slave nodes */ MASK_BIT(node_flag[slave - 1], MASK); /* mark slave nodes that have mpc-link across the boundary */ evalsum = 0; for (j = j0 + 1; j < je; j++) { master = global_mesh->mpc->mpc_item[j]; if (EVAL_BIT(node_flag[slave - 1], INTERNAL) ^ /* exclusive or */ EVAL_BIT(node_flag[master - 1], INTERNAL)) { evalsum++; } } if (evalsum) { MASK_BIT(node_flag[slave - 1], MARK); } } return RTC_NORMAL; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ /*K. Inagaki */ static int mask_overlap_elem(char *elem_flag, int domain) { int i, elem; for (i = 0; i < n_bnd_elist[2 * domain + 1]; i++) { elem = bnd_elist[domain][i]; MASK_BIT(elem_flag[elem - 1], OVERLAP); MASK_BIT(elem_flag[elem - 1], BOUNDARY); } return RTC_NORMAL; } static int mask_boundary_node(const struct hecmwST_local_mesh *global_mesh, char *node_flag, const char *elem_flag) { int node; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], BOUNDARY)) { for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; MASK_BIT(node_flag[node - 1], OVERLAP); MASK_BIT(node_flag[node - 1], BOUNDARY); } } } return RTC_NORMAL; } /*K. Inagaki */ static int mask_boundary_node_mod(const struct hecmwST_local_mesh *global_mesh, char *node_flag, char *elem_flag, int domain) { int i, node; for (i = 0; i < n_bnd_nlist[2 * domain + 1]; i++) { node = bnd_nlist[domain][i]; MASK_BIT(node_flag[node - 1], OVERLAP); MASK_BIT(node_flag[node - 1], BOUNDARY); } return RTC_NORMAL; } static int mask_boundary_elem_with_slave( const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *elem_flag, int *added) { int node, evalsum; int i, j; *added = 0; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], BOUNDARY)) continue; if (HECMW_is_etype_link(global_mesh->elem_type[i])) continue; /* skip link elements */ evalsum = 0; for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; /* check if the node is on boundary and a slave having mpc-link across the * boundary */ if (EVAL_BIT(node_flag[node - 1], BOUNDARY) && EVAL_BIT(node_flag[node - 1], MASK) && EVAL_BIT(node_flag[node - 1], MARK)) { evalsum++; } } if (evalsum) { MASK_BIT(elem_flag[i], OVERLAP); MASK_BIT(elem_flag[i], BOUNDARY); (*added)++; } } return RTC_NORMAL; } static int mask_boundary_link_elem_with_slave( const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *elem_flag, int *added) { int node, evalsum; int i, j; *added = 0; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], BOUNDARY)) continue; if (!HECMW_is_etype_link(global_mesh->elem_type[i])) continue; /* check only link elements */ evalsum = 0; for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; /* check if the node is on boundary and a slave */ if (EVAL_BIT(node_flag[node - 1], BOUNDARY) && EVAL_BIT(node_flag[node - 1], MASK)) { evalsum++; } } if (evalsum) { MASK_BIT(elem_flag[i], OVERLAP); MASK_BIT(elem_flag[i], BOUNDARY); (*added)++; } } return RTC_NORMAL; } static int mask_additional_overlap_elem( const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *elem_flag) { int node, evalsum; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { evalsum = 0; for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; evalsum += (EVAL_BIT(node_flag[node - 1], BOUNDARY)); } if (evalsum) { MASK_BIT(elem_flag[i], OVERLAP); MASK_BIT(elem_flag[i], BOUNDARY); } } return RTC_NORMAL; } static int mask_contact_slave_surf(const struct hecmwST_local_mesh *global_mesh, char *elem_flag, char *node_flag) { int i, j, k; int elem, node, selem; int evalsum, evalsum2; int master_gid, slave_gid; int jstart, jend; struct hecmwST_contact_pair *cp; struct hecmwST_surf_grp *sgrp; struct hecmwST_node_grp *ngrp; cp = global_mesh->contact_pair; sgrp = global_mesh->surf_group; ngrp = global_mesh->node_group; for (i = 0; i < cp->n_pair; i++) { switch (cp->type[i]) { case HECMW_CONTACT_TYPE_NODE_SURF: /* if any elem of master surf is internal */ evalsum = 0; master_gid = cp->master_grp_id[i]; jstart = sgrp->grp_index[master_gid - 1]; jend = sgrp->grp_index[master_gid]; for (j = jstart; j < jend; j++) { elem = sgrp->grp_item[j * 2]; if (EVAL_BIT(elem_flag[elem - 1], INTERNAL)) { evalsum++; break; } } if (evalsum) { /* mask all external slave nodes as BOUNDARY (but not OVERLAP) */ slave_gid = cp->slave_grp_id[i]; jstart = ngrp->grp_index[slave_gid - 1]; jend = ngrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { node = ngrp->grp_item[j]; if (!EVAL_BIT(node_flag[node - 1], INTERNAL)) { MASK_BIT(node_flag[node - 1], BOUNDARY); } } } /* if any elem of master surf is external */ evalsum = 0; master_gid = cp->master_grp_id[i]; jstart = sgrp->grp_index[master_gid - 1]; jend = sgrp->grp_index[master_gid]; for (j = jstart; j < jend; j++) { elem = sgrp->grp_item[j * 2]; if (!EVAL_BIT(elem_flag[elem - 1], INTERNAL)) { evalsum++; break; } } if (evalsum) { /* mask all internal slave nodes as BOUNDARY (but not OVERLAP) */ slave_gid = cp->slave_grp_id[i]; jstart = ngrp->grp_index[slave_gid - 1]; jend = ngrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { node = ngrp->grp_item[j]; if (EVAL_BIT(node_flag[node - 1], INTERNAL)) { MASK_BIT(node_flag[node - 1], BOUNDARY); } } } break; case HECMW_CONTACT_TYPE_SURF_SURF: /* if any elem of master surf is internal or boundary */ evalsum = 0; master_gid = cp->master_grp_id[i]; jstart = sgrp->grp_index[master_gid - 1]; jend = sgrp->grp_index[master_gid]; for (j = jstart; j < jend; j++) { elem = sgrp->grp_item[j * 2]; if (EVAL_BIT(elem_flag[elem - 1], INTERNAL) || EVAL_BIT(elem_flag[elem - 1], BOUNDARY)) { evalsum++; break; } } if (evalsum) { /* mask all external slave elems/nodes as BOUNDARY (but not OVERLAP) */ slave_gid = cp->slave_grp_id[i]; jstart = sgrp->grp_index[slave_gid - 1]; jend = sgrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { selem = sgrp->grp_item[j * 2]; if (!EVAL_BIT(elem_flag[selem - 1], INTERNAL)) { MASK_BIT(elem_flag[selem - 1], BOUNDARY); for (k = global_mesh->elem_node_index[selem - 1]; k < global_mesh->elem_node_index[selem]; k++) { node = global_mesh->elem_node_item[k]; MASK_BIT(node_flag[node - 1], BOUNDARY); } } } } /* if any elem of master surf is external or boundary */ evalsum = 0; master_gid = cp->master_grp_id[i]; jstart = sgrp->grp_index[master_gid - 1]; jend = sgrp->grp_index[master_gid]; for (j = jstart; j < jend; j++) { elem = sgrp->grp_item[j * 2]; if (!EVAL_BIT(elem_flag[elem - 1], INTERNAL) || EVAL_BIT(elem_flag[elem - 1], BOUNDARY)) { evalsum++; break; } } if (evalsum) { /* mask all internal slave nodes as BOUNDARY (but not OVERLAP) */ slave_gid = cp->slave_grp_id[i]; jstart = sgrp->grp_index[slave_gid - 1]; jend = sgrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { evalsum2 = 0; selem = sgrp->grp_item[j * 2]; for (k = global_mesh->elem_node_index[selem - 1]; k < global_mesh->elem_node_index[selem]; k++) { node = global_mesh->elem_node_item[k]; if (EVAL_BIT(node_flag[node - 1], INTERNAL)) { evalsum2++; break; } } if (evalsum2) { MASK_BIT(elem_flag[selem - 1], BOUNDARY); for (k = global_mesh->elem_node_index[selem - 1]; k < global_mesh->elem_node_index[selem]; k++) { node = global_mesh->elem_node_item[k]; MASK_BIT(node_flag[node - 1], BOUNDARY); } } } } break; default: return RTC_ERROR; } } return RTC_NORMAL; } static int mask_mesh_status_nb(const struct hecmwST_local_mesh *global_mesh, char *node_flag, char *elem_flag, int current_domain) { int rtc; int i; rtc = mask_node_by_domain(global_mesh, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_elem_by_domain_mod(elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_overlap_elem(elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_boundary_node_mod(global_mesh, node_flag, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; if (global_mesh->mpc->n_mpc > 0) { int added = 0; rtc = mask_slave_node(global_mesh, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_boundary_elem_with_slave(global_mesh, node_flag, elem_flag, &added); if (rtc != RTC_NORMAL) goto error; if (added > 0) { rtc = mask_boundary_node(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; } added = 0; rtc = mask_boundary_link_elem_with_slave(global_mesh, node_flag, elem_flag, &added); if (rtc != RTC_NORMAL) goto error; if (added > 0) { rtc = mask_boundary_node(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; } for (i = 0; i < global_mesh->n_node; i++) { CLEAR_BIT(node_flag[i], MASK); CLEAR_BIT(node_flag[i], MARK); } } for (i = 1; i < global_mesh->hecmw_flag_partdepth; i++) { rtc = mask_additional_overlap_elem(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; rtc = mask_boundary_node(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; } if (global_mesh->contact_pair->n_pair > 0) { rtc = mask_contact_slave_surf(global_mesh, elem_flag, node_flag); if (rtc != RTC_NORMAL) goto error; } return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int mask_overlap_node_mark(const struct hecmwST_local_mesh *global_mesh, char *node_flag, const char *elem_flag) { int node; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], INTERNAL)) { for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; MASK_BIT(node_flag[node - 1], MARK); } } else { for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; MASK_BIT(node_flag[node - 1], MASK); } } } return RTC_NORMAL; } static int mask_overlap_node_inner(const struct hecmwST_local_mesh *global_mesh, char *node_flag) { int i; for (i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], MARK) && EVAL_BIT(node_flag[i], MASK)) { MASK_BIT(node_flag[i], OVERLAP); MASK_BIT(node_flag[i], BOUNDARY); } } return RTC_NORMAL; } static int mask_overlap_node(const struct hecmwST_local_mesh *global_mesh, char *node_flag, const char *elem_flag) { int rtc; int i; rtc = mask_overlap_node_mark(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; rtc = mask_overlap_node_inner(global_mesh, node_flag); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < global_mesh->n_node; i++) { CLEAR_BIT(node_flag[i], MASK); CLEAR_BIT(node_flag[i], MARK); } return RTC_NORMAL; error: return RTC_ERROR; } static int mask_boundary_elem(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *elem_flag) { int node, evalsum; int i, j; for (i = 0; i < global_mesh->n_elem; i++) { evalsum = 0; for (j = global_mesh->elem_node_index[i]; j < global_mesh->elem_node_index[i + 1]; j++) { node = global_mesh->elem_node_item[j]; if (EVAL_BIT(node_flag[node - 1], BOUNDARY)) evalsum++; } if (evalsum) { MASK_BIT(elem_flag[i], OVERLAP); MASK_BIT(elem_flag[i], BOUNDARY); } } return RTC_NORMAL; } static int mask_mesh_status_eb(const struct hecmwST_local_mesh *global_mesh, char *node_flag, char *elem_flag, int current_domain) { int rtc; int i; for (i = 0; i < global_mesh->n_node; i++) { CLEAR_BIT(node_flag[i], INTERNAL); CLEAR_BIT(node_flag[i], EXTERNAL); CLEAR_BIT(node_flag[i], BOUNDARY); } for (i = 0; i < global_mesh->n_elem; i++) { CLEAR_BIT(elem_flag[i], INTERNAL); CLEAR_BIT(elem_flag[i], EXTERNAL); CLEAR_BIT(elem_flag[i], BOUNDARY); } rtc = mask_node_by_domain(global_mesh, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_elem_by_domain(global_mesh, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_overlap_node(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; rtc = mask_boundary_elem(global_mesh, node_flag, elem_flag); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int mask_neighbor_domain_nb(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *domain_flag) { int i; for (i = 0; i < global_mesh->n_node; i++) { if (!EVAL_BIT(node_flag[i], INTERNAL) && EVAL_BIT(node_flag[i], BOUNDARY)) { MASK_BIT(domain_flag[global_mesh->node_ID[2 * i + 1]], MASK); } } return RTC_NORMAL; } /*K. Inagaki */ static int mask_neighbor_domain_nb_mod( const struct hecmwST_local_mesh *global_mesh, const char *node_flag, char *domain_flag, int domain) { int i, node; for (i = n_bnd_nlist[2 * domain]; i < n_bnd_nlist[2 * domain + 1]; i++) { node = bnd_nlist[domain][i]; MASK_BIT(domain_flag[global_mesh->node_ID[2 * node - 1]], MASK); } return RTC_NORMAL; } static int mask_neighbor_domain_nb_contact( const struct hecmwST_local_mesh *global_mesh, const char *node_flag, const char *elem_flag, char *domain_flag) { int i, j, k; int elem, node, selem; int evalsum; int master_gid, slave_gid; int jstart, jend; struct hecmwST_contact_pair *cp; struct hecmwST_surf_grp *sgrp; struct hecmwST_node_grp *ngrp; cp = global_mesh->contact_pair; sgrp = global_mesh->surf_group; ngrp = global_mesh->node_group; for (i = 0; i < cp->n_pair; i++) { /* if any slave node is internal */ evalsum = 0; switch (cp->type[i]) { case HECMW_CONTACT_TYPE_NODE_SURF: slave_gid = cp->slave_grp_id[i]; jstart = ngrp->grp_index[slave_gid - 1]; jend = ngrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { node = ngrp->grp_item[j]; if (EVAL_BIT(node_flag[node - 1], INTERNAL)) { evalsum++; break; } } break; case HECMW_CONTACT_TYPE_SURF_SURF: slave_gid = cp->slave_grp_id[i]; jstart = sgrp->grp_index[slave_gid - 1]; jend = sgrp->grp_index[slave_gid]; for (j = jstart; j < jend; j++) { selem = sgrp->grp_item[j * 2]; for (k = global_mesh->elem_node_index[selem - 1]; k < global_mesh->elem_node_index[selem]; k++) { node = global_mesh->elem_node_item[k]; if (EVAL_BIT(node_flag[node - 1], INTERNAL)) { evalsum++; break; } } if (evalsum) break; } break; default: return RTC_ERROR; } /* the domain to which elems of the master surf belong is neighbor */ if (evalsum) { master_gid = cp->master_grp_id[i]; jstart = sgrp->grp_index[master_gid - 1]; jend = sgrp->grp_index[master_gid]; for (j = jstart; j < jend; j++) { elem = sgrp->grp_item[j * 2]; if (!EVAL_BIT(elem_flag[elem - 1], INTERNAL)) { MASK_BIT(domain_flag[global_mesh->elem_ID[2 * (elem - 1) + 1]], MASK); } } } } return RTC_NORMAL; } static int mask_neighbor_domain_eb(const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, char *domain_flag) { int i; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], EXTERNAL) && EVAL_BIT(elem_flag[i], BOUNDARY)) { MASK_BIT(domain_flag[global_mesh->elem_ID[2 * i + 1]], MASK); } } return RTC_NORMAL; } static int count_neighbor_domain(const struct hecmwST_local_mesh *global_mesh, const char *domain_flag) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_subdomain; i++) { if (EVAL_BIT(domain_flag[i], MASK)) counter++; } return counter; } static int set_neighbor_domain(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *domain_flag) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_subdomain; i++) { if (EVAL_BIT(domain_flag[i], MASK)) { local_mesh->neighbor_pe[counter++] = i; } } return counter; } static int create_neighbor_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, char *node_flag, char *elem_flag, int current_domain) { int rtc; char *domain_flag = NULL; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_flag); HECMW_assert(elem_flag); HECMW_log(HECMW_LOG_DEBUG, "Starting creation of neighboring domain information..."); local_mesh->n_neighbor_pe = 0; local_mesh->neighbor_pe = NULL; domain_flag = (char *)HECMW_calloc(global_mesh->n_subdomain, sizeof(char)); if (domain_flag == NULL) { HECMW_set_error(errno, ""); goto error; } switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ rtc = mask_mesh_status_nb(global_mesh, node_flag, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = mask_neighbor_domain_nb_mod(global_mesh, node_flag, domain_flag, current_domain); } else { rtc = mask_neighbor_domain_nb(global_mesh, node_flag, domain_flag); } if (rtc != RTC_NORMAL) goto error; rtc = mask_neighbor_domain_nb_contact(global_mesh, node_flag, elem_flag, domain_flag); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = mask_mesh_status_eb(global_mesh, node_flag, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_neighbor_domain_eb(global_mesh, elem_flag, domain_flag); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, ""); goto error; } local_mesh->n_neighbor_pe = count_neighbor_domain(global_mesh, domain_flag); if (local_mesh->n_neighbor_pe < 0) { HECMW_set_error(HECMW_PART_E_NNEIGHBORPE_LOWER, ""); goto error; } if (local_mesh->n_neighbor_pe == 0) { local_mesh->neighbor_pe = NULL; HECMW_free(domain_flag); return RTC_NORMAL; } local_mesh->neighbor_pe = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_neighbor_pe); if (local_mesh->neighbor_pe == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = set_neighbor_domain(global_mesh, local_mesh, domain_flag); HECMW_assert(rtc == local_mesh->n_neighbor_pe); HECMW_free(domain_flag); HECMW_log(HECMW_LOG_DEBUG, "Creation of neighboring domain information done"); return RTC_NORMAL; error: HECMW_free(domain_flag); HECMW_free(local_mesh->neighbor_pe); local_mesh->n_neighbor_pe = 0; local_mesh->neighbor_pe = NULL; return RTC_ERROR; } /*================================================================================================*/ static int mask_comm_node(const struct hecmwST_local_mesh *global_mesh, char *node_flag_current, char *node_flag_neighbor) { int i; for (i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag_current[i], BOUNDARY) && EVAL_BIT(node_flag_neighbor[i], BOUNDARY)) { MASK_BIT(node_flag_current[i], MASK); } } return RTC_NORMAL; } /*K. Inagaki */ static int mask_comm_node_mod(const struct hecmwST_local_mesh *global_mesh, char *node_flag_current, char *node_flag_neighbor, int current_domain) { int i, node; for (i = 0; i < n_bnd_nlist[2 * current_domain + 1]; i++) { node = bnd_nlist[current_domain][i]; if (EVAL_BIT(node_flag_neighbor[node - 1], BOUNDARY)) { MASK_BIT(node_flag_current[node - 1], MASK); } } return RTC_NORMAL; } static int mask_comm_elem(const struct hecmwST_local_mesh *global_mesh, char *elem_flag_current, char *elem_flag_neighbor) { int i; for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag_current[i], BOUNDARY) && EVAL_BIT(elem_flag_neighbor[i], BOUNDARY)) { MASK_BIT(elem_flag_current[i], MASK); } } return RTC_NORMAL; } /*K. Inagaki */ static int mask_comm_elem_mod(const struct hecmwST_local_mesh *global_mesh, char *elem_flag_current, char *elem_flag_neighbor, int current_domain) { int i, elem; for (i = 0; i < n_bnd_elist[2 * current_domain + 1]; i++) { elem = bnd_elist[current_domain][i]; if (EVAL_BIT(elem_flag_neighbor[elem - 1], BOUNDARY)) { MASK_BIT(elem_flag_current[elem - 1], MASK); } } return RTC_NORMAL; } /*K. Inagaki */ static int count_masked_comm_node(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, int domain) { int counter; int i, node; for (counter = 0, i = 0; i < n_int_nlist[domain]; i++) { node = int_nlist[domain][i]; if (EVAL_BIT(node_flag[node - 1], MASK)) counter++; } return counter; } static int count_masked_comm_elem(const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, int domain) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], MASK) && global_mesh->elem_ID[2 * i + 1] == domain) counter++; } return counter; } static int count_masked_shared_node( const struct hecmwST_local_mesh *global_mesh, const char *node_flag) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], MASK)) counter++; } return counter; } static int count_masked_shared_elem( const struct hecmwST_local_mesh *global_mesh, const char *elem_flag) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], MASK)) counter++; } return counter; } /*K. Inagaki */ static int count_masked_shared_elem_mod( const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, int domain) { int counter; int i, elem; for (counter = 0, i = 0; i < n_bnd_elist[2 * domain + 1]; i++) { elem = bnd_elist[domain][i]; if (EVAL_BIT(elem_flag[elem - 1], MASK)) counter++; } return counter; } /*K. Inagaki */ static int create_comm_node_pre(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, int **comm_node, int neighbor_idx, int domain) { int counter; int i, node; for (counter = 0, i = 0; i < n_int_nlist[domain]; i++) { node = int_nlist[domain][i]; if (EVAL_BIT(node_flag[node - 1], MASK)) { comm_node[neighbor_idx][counter++] = node; } } return counter; } static int create_comm_elem_pre(const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, int **comm_elem, int neighbor_idx, int domain) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], MASK) && global_mesh->elem_ID[2 * i + 1] == domain) { comm_elem[neighbor_idx][counter++] = i + 1; } } return counter; } static int create_shared_node_pre(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, int **shared_node, int neighbor_idx) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], MASK)) { shared_node[neighbor_idx][counter++] = i + 1; } } return counter; } static int create_shared_elem_pre(const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, int **shared_elem, int neighbor_idx) { int counter; int i; for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], MASK)) { shared_elem[neighbor_idx][counter++] = i + 1; } } return counter; } /*K. Inagaki */ static int create_shared_elem_pre_mod( const struct hecmwST_local_mesh *global_mesh, const char *elem_flag, int **shared_elem, int neighbor_idx, int neighbor_domain) { int counter; int i, idx1, idx2, elem1, elem2, n_bnd, n_out, maxe; n_bnd = n_bnd_elist[2 * neighbor_domain]; n_out = n_bnd_elist[2 * neighbor_domain + 1] - n_bnd_elist[2 * neighbor_domain]; maxe = global_mesh->n_elem + 1; elem1 = (n_bnd == 0) ? maxe : bnd_elist[neighbor_domain][0]; elem2 = (n_out == 0) ? maxe : bnd_elist[neighbor_domain][n_bnd]; for (counter = 0, idx1 = 0, idx2 = 0, i = 0; i < n_bnd + n_out; i++) { if (elem1 < elem2) { if (EVAL_BIT(elem_flag[elem1 - 1], MASK)) { shared_elem[neighbor_idx][counter++] = elem1; } idx1++; elem1 = (idx1 == n_bnd) ? maxe : bnd_elist[neighbor_domain][idx1]; } else { if (EVAL_BIT(elem_flag[elem2 - 1], MASK)) { shared_elem[neighbor_idx][counter++] = elem2; } idx2++; elem2 = (idx2 == n_out) ? maxe : bnd_elist[neighbor_domain][idx2 + n_bnd]; } } return counter; } static int create_comm_item(int n_neighbor_pe, int **comm_item_pre, int *comm_index, int *comm_item) { int i, j, js, je; for (i = 0; i < n_neighbor_pe; i++) { js = comm_index[i]; je = comm_index[i + 1]; for (j = 0; j < je - js; j++) { comm_item[js + j] = comm_item_pre[i][j]; } } return RTC_NORMAL; } /*------------------------------------------------------------------------------------------------*/ static int create_import_info_nb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag, int **import_node, int neighbor_idx, int neighbor_domain) { int n_import_node, rtc; n_import_node = count_masked_comm_node(global_mesh, node_flag, neighbor_domain); HECMW_assert(n_import_node >= 0); local_mesh->import_index[neighbor_idx + 1] = local_mesh->import_index[neighbor_idx] + n_import_node; import_node[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_import_node); if (import_node[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_node_pre(global_mesh, node_flag, import_node, neighbor_idx, neighbor_domain); HECMW_assert(rtc == n_import_node); return RTC_NORMAL; error: return RTC_ERROR; } static int create_export_info_nb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag, int **export_node, int neighbor_idx, int current_domain, int neighbor_domain) { int n_export_node, rtc; n_export_node = count_masked_comm_node(global_mesh, node_flag, current_domain); HECMW_assert(n_export_node >= 0); local_mesh->export_index[neighbor_idx + 1] = local_mesh->export_index[neighbor_idx] + n_export_node; export_node[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_export_node); if (export_node[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_node_pre(global_mesh, node_flag, export_node, neighbor_idx, current_domain); HECMW_assert(rtc == n_export_node); return RTC_NORMAL; error: return RTC_ERROR; } static int create_shared_info_nb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *elem_flag, int **shared_elem, int neighbor_idx, int neighbor_domain) { int n_shared_elem, rtc; if (is_spdup_available(global_mesh)) { n_shared_elem = count_masked_shared_elem_mod(global_mesh, elem_flag, neighbor_domain); } else { n_shared_elem = count_masked_shared_elem(global_mesh, elem_flag); } HECMW_assert(n_shared_elem >= 0); local_mesh->shared_index[neighbor_idx + 1] = local_mesh->shared_index[neighbor_idx] + n_shared_elem; shared_elem[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_shared_elem); if (shared_elem[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } if (is_spdup_available(global_mesh)) { rtc = create_shared_elem_pre_mod(global_mesh, elem_flag, shared_elem, neighbor_idx, neighbor_domain); } else { rtc = create_shared_elem_pre(global_mesh, elem_flag, shared_elem, neighbor_idx); } HECMW_assert(rtc == n_shared_elem); return RTC_NORMAL; error: return RTC_ERROR; } static int create_comm_info_nb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, char *node_flag, char *elem_flag, char *node_flag_neighbor, char *elem_flag_neighbor, int current_domain) { int **import_node = NULL; int **export_node = NULL; int **shared_elem = NULL; int neighbor_domain; int size; int rtc; int i, j; local_mesh->import_index = NULL; local_mesh->export_index = NULL; local_mesh->shared_index = NULL; local_mesh->import_item = NULL; local_mesh->export_item = NULL; local_mesh->shared_item = NULL; import_node = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (import_node == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { import_node[i] = NULL; } } export_node = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (export_node == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { export_node[i] = NULL; } } shared_elem = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (shared_elem == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { shared_elem[i] = NULL; } } local_mesh->import_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->import_index == NULL) { HECMW_set_error(errno, ""); goto error; } local_mesh->export_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->export_index == NULL) { HECMW_set_error(errno, ""); goto error; } local_mesh->shared_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->shared_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_neighbor_pe; i++) { neighbor_domain = local_mesh->neighbor_pe[i]; rtc = mask_mesh_status_nb(global_mesh, node_flag_neighbor, elem_flag_neighbor, neighbor_domain); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = mask_comm_node_mod(global_mesh, node_flag, node_flag_neighbor, current_domain); } else { rtc = mask_comm_node(global_mesh, node_flag, node_flag_neighbor); } if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = mask_comm_elem_mod(global_mesh, elem_flag, elem_flag_neighbor, current_domain); } else { rtc = mask_comm_elem(global_mesh, elem_flag, elem_flag_neighbor); } if (rtc != RTC_NORMAL) goto error; rtc = create_import_info_nb(global_mesh, local_mesh, node_flag, import_node, i, neighbor_domain); if (rtc != RTC_NORMAL) goto error; rtc = create_export_info_nb(global_mesh, local_mesh, node_flag, export_node, i, current_domain, neighbor_domain); if (rtc != RTC_NORMAL) goto error; rtc = create_shared_info_nb(global_mesh, local_mesh, elem_flag, shared_elem, i, neighbor_domain); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { /*K. Inagaki */ rtc = spdup_clear_IEB(node_flag_neighbor, elem_flag_neighbor, neighbor_domain); if (rtc != RTC_NORMAL) goto error; rtc = spdup_clear_MMbnd(node_flag_neighbor, elem_flag_neighbor, neighbor_domain); if (rtc != RTC_NORMAL) goto error; rtc = spdup_clear_MMbnd(node_flag, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; } else { for (j = 0; j < global_mesh->n_node; j++) { CLEAR_MM(node_flag[j]); } for (j = 0; j < global_mesh->n_elem; j++) { CLEAR_MM(elem_flag[j]); } memset(node_flag_neighbor, 0, sizeof(char) * global_mesh->n_node); memset(elem_flag_neighbor, 0, sizeof(char) * global_mesh->n_elem); } } size = sizeof(int) * local_mesh->import_index[local_mesh->n_neighbor_pe]; local_mesh->import_item = (int *)HECMW_malloc(size); if (local_mesh->import_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, import_node, local_mesh->import_index, local_mesh->import_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(import_node[i]); } HECMW_free(import_node); import_node = NULL; size = sizeof(int) * local_mesh->export_index[local_mesh->n_neighbor_pe]; local_mesh->export_item = (int *)HECMW_malloc(size); if (local_mesh->export_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, export_node, local_mesh->export_index, local_mesh->export_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(export_node[i]); } HECMW_free(export_node); export_node = NULL; size = sizeof(int) * local_mesh->shared_index[local_mesh->n_neighbor_pe]; local_mesh->shared_item = (int *)HECMW_malloc(size); if (local_mesh->shared_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, shared_elem, local_mesh->shared_index, local_mesh->shared_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(shared_elem[i]); } HECMW_free(shared_elem); shared_elem = NULL; return RTC_NORMAL; error: if (import_node) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(import_node[i]); } HECMW_free(import_node); } if (export_node) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(export_node[i]); } HECMW_free(export_node); } if (shared_elem) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(shared_elem[i]); } HECMW_free(shared_elem); } HECMW_free(local_mesh->import_index); HECMW_free(local_mesh->export_index); HECMW_free(local_mesh->shared_index); HECMW_free(local_mesh->import_item); HECMW_free(local_mesh->export_item); HECMW_free(local_mesh->shared_item); local_mesh->import_index = NULL; local_mesh->export_index = NULL; local_mesh->shared_index = NULL; local_mesh->import_item = NULL; local_mesh->export_item = NULL; local_mesh->shared_item = NULL; return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int create_import_info_eb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *elem_flag, int **import_elem, int neighbor_idx, int neighbor_domain) { int n_import_elem, rtc; n_import_elem = count_masked_comm_elem(global_mesh, elem_flag, neighbor_domain); HECMW_assert(n_import_elem >= 0); local_mesh->import_index[neighbor_idx + 1] = local_mesh->import_index[neighbor_idx] + n_import_elem; import_elem[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_import_elem); if (import_elem[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_elem_pre(global_mesh, elem_flag, import_elem, neighbor_idx, neighbor_domain); HECMW_assert(rtc == n_import_elem); return RTC_NORMAL; error: return RTC_ERROR; } static int create_export_info_eb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *elem_flag, int **export_elem, int neighbor_idx, int current_domain, int neighbor_domain) { int n_export_elem, rtc; n_export_elem = count_masked_comm_elem(global_mesh, elem_flag, current_domain); HECMW_assert(n_export_elem >= 0); local_mesh->export_index[neighbor_idx + 1] = local_mesh->export_index[neighbor_idx] + n_export_elem; export_elem[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_export_elem); if (export_elem[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_elem_pre(global_mesh, elem_flag, export_elem, neighbor_idx, current_domain); HECMW_assert(rtc == n_export_elem); return RTC_NORMAL; error: return RTC_ERROR; } static int create_shared_info_eb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag, int **shared_node, int neighbor_idx, int neighbor_domain) { int n_shared_node, rtc; n_shared_node = count_masked_shared_node(global_mesh, node_flag); HECMW_assert(n_shared_node >= 0); local_mesh->shared_index[neighbor_idx + 1] = local_mesh->shared_index[neighbor_idx] + n_shared_node; shared_node[neighbor_idx] = (int *)HECMW_malloc(sizeof(int) * n_shared_node); if (shared_node[neighbor_idx] == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_shared_node_pre(global_mesh, node_flag, shared_node, neighbor_idx); HECMW_assert(rtc == n_shared_node); return RTC_NORMAL; error: return RTC_ERROR; } /*------------------------------------------------------------------------------------------------*/ static int create_comm_info_eb(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, char *node_flag, char *elem_flag, char *node_flag_neighbor, char *elem_flag_neighbor, int current_domain) { int **import_elem = NULL; int **export_elem = NULL; int **shared_node = NULL; int neighbor_domain; int size; int rtc; int i, j; /* allocation */ local_mesh->import_index = NULL; local_mesh->export_index = NULL; local_mesh->shared_index = NULL; local_mesh->import_item = NULL; local_mesh->export_item = NULL; local_mesh->shared_item = NULL; import_elem = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (import_elem == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { import_elem[i] = NULL; } } export_elem = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (export_elem == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { export_elem[i] = NULL; } } shared_node = (int **)HECMW_malloc(sizeof(int *) * local_mesh->n_neighbor_pe); if (shared_node == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < local_mesh->n_neighbor_pe; i++) { shared_node[i] = NULL; } } local_mesh->import_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->import_index == NULL) { HECMW_set_error(errno, ""); goto error; } local_mesh->export_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->export_index == NULL) { HECMW_set_error(errno, ""); goto error; } local_mesh->shared_index = (int *)HECMW_calloc(local_mesh->n_neighbor_pe + 1, sizeof(int)); if (local_mesh->shared_index == NULL) { HECMW_set_error(errno, ""); goto error; } /* create communication table */ for (i = 0; i < local_mesh->n_neighbor_pe; i++) { neighbor_domain = local_mesh->neighbor_pe[i]; for (j = 0; j < global_mesh->n_node; j++) { CLEAR_BIT(node_flag[j], MASK); CLEAR_BIT(node_flag[j], MARK); } for (j = 0; j < global_mesh->n_elem; j++) { CLEAR_BIT(elem_flag[j], MASK); CLEAR_BIT(elem_flag[j], MARK); } memset(node_flag_neighbor, 0, sizeof(char) * global_mesh->n_node); memset(elem_flag_neighbor, 0, sizeof(char) * global_mesh->n_elem); /* mask boundary node & element */ rtc = mask_mesh_status_eb(global_mesh, node_flag_neighbor, elem_flag_neighbor, neighbor_domain); if (rtc != RTC_NORMAL) goto error; rtc = mask_comm_node(global_mesh, node_flag, node_flag_neighbor); if (rtc != RTC_NORMAL) goto error; rtc = mask_comm_elem(global_mesh, elem_flag, elem_flag_neighbor); if (rtc != RTC_NORMAL) goto error; /* create import element information (preliminary) */ rtc = create_import_info_eb(global_mesh, local_mesh, elem_flag, import_elem, i, neighbor_domain); if (rtc != RTC_NORMAL) goto error; /* create export element information (preliminary) */ rtc = create_export_info_eb(global_mesh, local_mesh, elem_flag, export_elem, i, current_domain, neighbor_domain); if (rtc != RTC_NORMAL) goto error; /* create shared node information (preliminary) */ rtc = create_shared_info_eb(global_mesh, local_mesh, node_flag, shared_node, i, neighbor_domain); if (rtc != RTC_NORMAL) goto error; } /* create import element information */ size = sizeof(int) * local_mesh->import_index[local_mesh->n_neighbor_pe]; local_mesh->import_item = (int *)HECMW_malloc(size); if (local_mesh->import_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, import_elem, local_mesh->import_index, local_mesh->import_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(import_elem[i]); } HECMW_free(import_elem); import_elem = NULL; /* create export node information */ size = sizeof(int) * local_mesh->export_index[local_mesh->n_neighbor_pe]; local_mesh->export_item = (int *)HECMW_malloc(size); if (local_mesh->export_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, export_elem, local_mesh->export_index, local_mesh->export_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(export_elem[i]); } HECMW_free(export_elem); export_elem = NULL; /* create shared element information */ size = sizeof(int) * local_mesh->shared_index[local_mesh->n_neighbor_pe]; local_mesh->shared_item = (int *)HECMW_malloc(size); if (local_mesh->shared_item == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = create_comm_item(local_mesh->n_neighbor_pe, shared_node, local_mesh->shared_index, local_mesh->shared_item); if (rtc != RTC_NORMAL) goto error; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(shared_node[i]); } HECMW_free(shared_node); shared_node = NULL; return RTC_NORMAL; error: if (import_elem) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(import_elem[i]); } HECMW_free(import_elem); } if (export_elem) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(export_elem[i]); } HECMW_free(export_elem); } if (shared_node) { int i; for (i = 0; i < local_mesh->n_neighbor_pe; i++) { HECMW_free(shared_node[i]); } HECMW_free(shared_node); } HECMW_free(local_mesh->import_index); HECMW_free(local_mesh->export_index); HECMW_free(local_mesh->shared_index); HECMW_free(local_mesh->import_item); HECMW_free(local_mesh->export_item); HECMW_free(local_mesh->shared_item); local_mesh->import_index = NULL; local_mesh->export_index = NULL; local_mesh->shared_index = NULL; local_mesh->import_item = NULL; local_mesh->export_item = NULL; local_mesh->shared_item = NULL; return RTC_ERROR; } /*================================================================================================*/ static int create_comm_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, char *node_flag, char *elem_flag, char *node_flag_neighbor, char *elem_flag_neighbor, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_flag); HECMW_assert(elem_flag); HECMW_log(HECMW_LOG_DEBUG, "Starting creation of interface table..."); switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ rtc = create_comm_info_nb(global_mesh, local_mesh, node_flag, elem_flag, node_flag_neighbor, elem_flag_neighbor, current_domain); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = create_comm_info_eb(global_mesh, local_mesh, node_flag, elem_flag, node_flag_neighbor, elem_flag_neighbor, current_domain); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, ""); goto error; } HECMW_log(HECMW_LOG_DEBUG, "Creation of interface table done"); return RTC_NORMAL; error: return RTC_ERROR; } /*================================================================================================== create distributed mesh information ==================================================================================================*/ /*K. Inagaki */ static int set_node_global2local_internal( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag, int domain) { int counter; int i, node; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); for (counter = 0, i = 0; i < n_int_nlist[domain]; i++) { node = int_nlist[domain][i]; node_global2local[node - 1] = ++counter; } local_mesh->nn_internal = counter; return RTC_NORMAL; } static int set_node_global2local_external( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); /* ordinary external nodes are marked as BOUNDARY && OVERLAP */ for (counter = local_mesh->nn_internal, i = 0; i < global_mesh->n_node; i++) { if (!EVAL_BIT(node_flag[i], INTERNAL) && EVAL_BIT(node_flag[i], BOUNDARY) && EVAL_BIT(node_flag[i], OVERLAP)) { node_global2local[i] = ++counter; } } local_mesh->nn_middle = counter; /* added external contact slave nodes are marked as BOUNDARY but not OVERLAP */ for (i = 0; i < global_mesh->n_node; i++) { if (!EVAL_BIT(node_flag[i], INTERNAL) && EVAL_BIT(node_flag[i], BOUNDARY) && !EVAL_BIT(node_flag[i], OVERLAP)) { node_global2local[i] = ++counter; } } local_mesh->n_node = counter; local_mesh->n_node_gross = counter; HECMW_assert(local_mesh->n_node > 0); return RTC_NORMAL; } /*K. Inagaki */ static int set_node_global2local_external_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag, int domain) { int counter; int i, node; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); for (counter = local_mesh->nn_internal, i = n_bnd_nlist[2 * domain]; i < n_bnd_nlist[2 * domain + 1]; i++) { node = bnd_nlist[domain][i]; node_global2local[node - 1] = ++counter; } local_mesh->n_node = counter; local_mesh->n_node_gross = counter; HECMW_assert(local_mesh->n_node > 0); return RTC_NORMAL; } static int set_node_global2local_all( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], INTERNAL) || EVAL_BIT(node_flag[i], BOUNDARY)) { node_global2local[i] = ++counter; } } local_mesh->n_node = counter; local_mesh->n_node_gross = counter; HECMW_assert(local_mesh->n_node > 0); return RTC_NORMAL; } static int const_nn_internal(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], INTERNAL)) counter++; } local_mesh->nn_internal = counter; return 0; } static int const_node_internal_list( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); HECMW_assert(global_mesh->n_node > 0); if (local_mesh->nn_internal == 0) { local_mesh->node_internal_list = NULL; return RTC_NORMAL; } local_mesh->node_internal_list = (int *)HECMW_malloc(sizeof(int) * local_mesh->nn_internal); if (local_mesh->node_internal_list == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], INTERNAL)) { local_mesh->node_internal_list[counter++] = node_global2local[i]; } } HECMW_assert(counter == local_mesh->nn_internal); return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int set_node_global2local(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, const char *node_flag, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_flag); switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: rtc = set_node_global2local_internal(global_mesh, local_mesh, node_global2local, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = set_node_global2local_external_mod(global_mesh, local_mesh, node_global2local, node_flag, current_domain); } else { rtc = set_node_global2local_external(global_mesh, local_mesh, node_global2local, node_flag); } if (rtc != RTC_NORMAL) goto error; local_mesh->node_internal_list = NULL; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: rtc = const_nn_internal(global_mesh, local_mesh, node_flag); if (rtc != RTC_NORMAL) goto error; rtc = set_node_global2local_all(global_mesh, local_mesh, node_global2local, node_flag); if (rtc != RTC_NORMAL) goto error; rtc = const_node_internal_list(global_mesh, local_mesh, node_global2local, node_flag); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", global_mesh->hecmw_flag_parttype); goto error; } return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int clear_node_global2local(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *node_global2local, int domain) { int rtc; int i, node; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); if (is_spdup_available(global_mesh)) { for (i = 0; i < n_int_nlist[domain]; i++) { node = int_nlist[domain][i]; node_global2local[node - 1] = 0; } for (i = n_bnd_nlist[2 * domain]; i < n_bnd_nlist[2 * domain + 1]; i++) { node = bnd_nlist[domain][i]; node_global2local[node - 1] = 0; } } else { for (i = 0; i < global_mesh->n_node; i++) { node_global2local[i] = 0; } } return RTC_NORMAL; } /*------------------------------------------------------------------------------------------------*/ static int set_node_local2global(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, int *node_local2global) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_local2global); HECMW_assert(global_mesh->n_node > 0); for (counter = 0, i = 0; i < global_mesh->n_node; i++) { if (node_global2local[i]) { node_local2global[node_global2local[i] - 1] = i + 1; counter++; } } HECMW_assert(counter == local_mesh->n_node); return RTC_NORMAL; } /*K. Inagaki */ static int set_node_local2global_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, int *node_local2global, int domain) { int counter; int i, idx1, idx2, node1, node2, n_int, n_bnd, n_out, maxn; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(node_local2global); HECMW_assert(global_mesh->n_node > 0); n_int = n_int_nlist[domain]; n_bnd = n_bnd_nlist[2 * domain]; n_out = n_bnd_nlist[2 * domain + 1] - n_bnd_nlist[2 * domain]; maxn = global_mesh->n_node + 1; node1 = (n_int == 0) ? maxn : int_nlist[domain][0]; node2 = (n_out == 0) ? maxn : bnd_nlist[domain][n_bnd]; for (counter = 0, idx1 = 0, idx2 = 0, i = 0; i < n_int + n_out; i++) { if (node1 < node2) { node_local2global[node_global2local[node1 - 1] - 1] = node1; idx1++; node1 = (idx1 == n_int) ? maxn : int_nlist[domain][idx1]; } else { node_local2global[node_global2local[node2 - 1] - 1] = node2; idx2++; node2 = (idx2 == n_out) ? maxn : bnd_nlist[domain][idx2 + n_bnd]; } counter++; } HECMW_assert(counter == local_mesh->n_node); return RTC_NORMAL; } /*------------------------------------------------------------------------------------------------*/ static int set_elem_global2local_internal( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); HECMW_assert(global_mesh->n_elem); for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], INTERNAL)) { elem_global2local[i] = ++counter; } } local_mesh->ne_internal = counter; return RTC_NORMAL; } static int set_elem_global2local_external( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); HECMW_assert(global_mesh->n_elem); for (counter = local_mesh->ne_internal, i = 0; i < global_mesh->n_elem; i++) { if (!EVAL_BIT(elem_flag[i], INTERNAL) && EVAL_BIT(elem_flag[i], BOUNDARY)) { elem_global2local[i] = ++counter; } } local_mesh->n_elem = counter; local_mesh->n_elem_gross = counter; HECMW_assert(local_mesh->n_elem > 0); return RTC_NORMAL; } static int set_elem_global2local_all( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); HECMW_assert(global_mesh->n_elem > 0); for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], INTERNAL) || EVAL_BIT(elem_flag[i], BOUNDARY)) { elem_global2local[i] = ++counter; } } local_mesh->n_elem = counter; local_mesh->n_elem_gross = counter; HECMW_assert(local_mesh->n_elem > 0); return RTC_NORMAL; } /*K. Inagaki */ static int set_elem_global2local_all_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag, int domain) { int counter; int i, idx1, idx2, elem1, elem2, n_int, n_bnd, n_out, maxe; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); HECMW_assert(global_mesh->n_elem > 0); n_int = n_int_elist[domain]; n_bnd = n_bnd_elist[2 * domain]; n_out = n_bnd_elist[2 * domain + 1] - n_bnd_elist[2 * domain]; maxe = global_mesh->n_elem + 1; elem1 = (n_int == 0) ? maxe : int_elist[domain][0]; elem2 = (n_out == 0) ? maxe : bnd_elist[domain][n_bnd]; for (counter = 0, idx1 = 0, idx2 = 0, i = 0; i < n_int + n_out; i++) { if (elem1 < elem2) { elem_global2local[elem1 - 1] = ++counter; idx1++; elem1 = (idx1 == n_int) ? maxe : int_elist[domain][idx1]; } else { elem_global2local[elem2 - 1] = ++counter; idx2++; elem2 = (idx2 == n_out) ? maxe : bnd_elist[domain][idx2 + n_bnd]; } } local_mesh->n_elem = counter; local_mesh->n_elem_gross = counter; HECMW_assert(local_mesh->n_elem > 0); return RTC_NORMAL; } static int const_ne_internal(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *elem_flag) { int counter; int i; HECMW_assert(global_mesh->n_elem > 0); for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], INTERNAL)) counter++; } local_mesh->ne_internal = counter; return RTC_NORMAL; } /*K. Inagaki */ static int const_elem_internal_list( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag, int domain) { int counter; int i, elem; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); HECMW_assert(global_mesh->n_elem > 0); if (local_mesh->ne_internal == 0) { local_mesh->elem_internal_list = NULL; return RTC_NORMAL; } local_mesh->elem_internal_list = (int *)HECMW_malloc(sizeof(int) * local_mesh->ne_internal); if (local_mesh->elem_internal_list == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < n_int_elist[domain]; i++) { elem = int_elist[domain][i]; local_mesh->elem_internal_list[counter++] = elem_global2local[elem - 1]; } HECMW_assert(counter == local_mesh->ne_internal); return RTC_NORMAL; error: return RTC_ERROR; } static int set_elem_global2local(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, const char *elem_flag, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_flag); switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: /* for node-based partitioning */ local_mesh->ne_internal = n_int_elist[current_domain]; if (is_spdup_available(global_mesh)) { rtc = set_elem_global2local_all_mod(global_mesh, local_mesh, elem_global2local, elem_flag, current_domain); } else { rtc = set_elem_global2local_all(global_mesh, local_mesh, elem_global2local, elem_flag); } if (rtc != RTC_NORMAL) goto error; rtc = const_elem_internal_list(global_mesh, local_mesh, elem_global2local, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: /* for element-based partitioning */ rtc = set_elem_global2local_internal(global_mesh, local_mesh, elem_global2local, elem_flag); if (rtc != RTC_NORMAL) goto error; rtc = set_elem_global2local_external(global_mesh, local_mesh, elem_global2local, elem_flag); if (rtc != RTC_NORMAL) goto error; local_mesh->elem_internal_list = NULL; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", global_mesh->hecmw_flag_parttype); goto error; } return RTC_NORMAL; error: return RTC_ERROR; } static int clear_elem_global2local(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, int *elem_global2local, int domain) { int rtc; int i, elem; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); if (is_spdup_available(global_mesh)) { for (i = 0; i < n_int_elist[domain]; i++) { elem = int_elist[domain][i]; elem_global2local[elem - 1] = 0; } for (i = n_bnd_elist[2 * domain]; i < n_bnd_elist[2 * domain + 1]; i++) { elem = bnd_elist[domain][i]; elem_global2local[elem - 1] = 0; } } else { for (i = 0; i < global_mesh->n_elem; i++) { elem_global2local[i] = 0; } } return RTC_NORMAL; } /*------------------------------------------------------------------------------------------------*/ static int set_elem_local2global(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int *elem_local2global) { int counter; int i; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_local2global); HECMW_assert(global_mesh->n_elem > 0); for (counter = 0, i = 0; i < global_mesh->n_elem; i++) { if (elem_global2local[i]) { elem_local2global[elem_global2local[i] - 1] = i + 1; counter++; } } HECMW_assert(counter == local_mesh->n_elem); return RTC_NORMAL; } /*K. Inagaki */ static int set_elem_local2global_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int *elem_local2global, int domain) { int counter; int i, idx1, idx2, elem1, elem2, n_int, n_bnd, n_out, maxe; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(elem_global2local); HECMW_assert(elem_local2global); HECMW_assert(global_mesh->n_elem > 0); n_int = n_int_elist[domain]; n_bnd = n_bnd_elist[2 * domain]; n_out = n_bnd_elist[2 * domain + 1] - n_bnd_elist[2 * domain]; maxe = global_mesh->n_elem + 1; elem1 = (n_int == 0) ? maxe : int_elist[domain][0]; elem2 = (n_out == 0) ? maxe : bnd_elist[domain][n_bnd]; for (counter = 0, idx1 = 0, idx2 = 0, i = 0; i < n_int + n_out; i++) { if (elem1 < elem2) { elem_local2global[elem_global2local[elem1 - 1] - 1] = elem1; idx1++; elem1 = (idx1 == n_int) ? maxe : int_elist[domain][idx1]; } else { elem_local2global[elem_global2local[elem2 - 1] - 1] = elem2; idx2++; elem2 = (idx2 == n_out) ? maxe : bnd_elist[domain][idx2 + n_bnd]; } counter++; } HECMW_assert(counter == local_mesh->n_elem); return RTC_NORMAL; } /*================================================================================================*/ static int const_gridfile(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { strcpy(local_mesh->gridfile, global_mesh->gridfile); return RTC_NORMAL; } static int const_hecmw_n_file(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_n_file = global_mesh->hecmw_n_file; return RTC_NORMAL; } static int const_files(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->files = global_mesh->files; return RTC_NORMAL; } static int const_header(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { strcpy(local_mesh->header, global_mesh->header); return RTC_NORMAL; } static int const_hecmw_flag_adapt(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_adapt = global_mesh->hecmw_flag_adapt; return RTC_NORMAL; } static int const_hecmw_flag_initcon( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_initcon = global_mesh->hecmw_flag_initcon; return RTC_NORMAL; } static int const_hecmw_flag_parttype( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_parttype = global_mesh->hecmw_flag_parttype; return RTC_NORMAL; } static int const_hecmw_flag_partdepth( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_partdepth = global_mesh->hecmw_flag_partdepth; return RTC_NORMAL; } static int const_hecmw_flag_version( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_version = global_mesh->hecmw_flag_version; return RTC_NORMAL; } static int const_hecmw_flag_partcontact( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->hecmw_flag_partcontact = global_mesh->hecmw_flag_partcontact; return RTC_NORMAL; } static int const_zero_temp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->zero_temp = global_mesh->zero_temp; return RTC_NORMAL; } static int const_global_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); rtc = const_gridfile(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_n_file(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_files(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_header(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_adapt(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_initcon(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_parttype(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_partdepth(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_version(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_hecmw_flag_partcontact(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_zero_temp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_dof(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { HECMW_assert(global_mesh->n_dof > 0); local_mesh->n_dof = global_mesh->n_dof; HECMW_assert(local_mesh->n_dof > 0); return RTC_NORMAL; } static int const_n_dof_grp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { HECMW_assert(global_mesh->n_dof_grp); local_mesh->n_dof_grp = global_mesh->n_dof_grp; HECMW_assert(global_mesh->n_dof_grp); return RTC_NORMAL; } static int const_node_dof_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag) { int counter; int i, j; HECMW_assert(local_mesh->n_dof_grp > 0); HECMW_assert(global_mesh->node_dof_index); local_mesh->node_dof_index = (int *)HECMW_calloc(local_mesh->n_dof_grp + 1, sizeof(int)); if (local_mesh->node_dof_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_dof_grp; i++) { for (j = global_mesh->node_dof_index[i]; j < global_mesh->node_dof_index[i + 1]; j++) { if (EVAL_BIT(node_flag[j], INTERNAL)) counter++; } local_mesh->node_dof_index[i + 1] = counter; } HECMW_assert(local_mesh->node_dof_index[local_mesh->n_dof_grp] == local_mesh->nn_internal); return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_node_dof_index_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *node_flag, int domain) { int counter; int i, j, node; HECMW_assert(local_mesh->n_dof_grp > 0); HECMW_assert(global_mesh->node_dof_index); local_mesh->node_dof_index = (int *)HECMW_calloc(local_mesh->n_dof_grp + 1, sizeof(int)); if (local_mesh->node_dof_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_dof_grp; i++) { for (j = 0; j < n_int_nlist[domain]; j++) { node = int_nlist[domain][j]; if (node <= global_mesh->node_dof_index[i]) continue; if (node > global_mesh->node_dof_index[i + 1]) continue; counter++; } local_mesh->node_dof_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_dof_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { HECMW_assert(global_mesh->node_dof_item); local_mesh->node_dof_item = global_mesh->node_dof_item; return 0; } static int const_node(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global) { int i; HECMW_assert(local_mesh->n_node > 0); HECMW_assert(global_mesh->node); local_mesh->node = (double *)HECMW_malloc(sizeof(double) * local_mesh->n_node * 3); if (local_mesh->node == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_node; i++) { local_mesh->node[3 * i] = global_mesh->node[3 * (node_local2global[i] - 1)]; local_mesh->node[3 * i + 1] = global_mesh->node[3 * (node_local2global[i] - 1) + 1]; local_mesh->node[3 * i + 2] = global_mesh->node[3 * (node_local2global[i] - 1) + 2]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_id(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global) { int i; HECMW_assert(local_mesh->n_node > 0); HECMW_assert(global_mesh->node_ID); local_mesh->node_ID = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_node * 2); if (local_mesh->node_ID == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_node; i++) { local_mesh->node_ID[2 * i] = global_mesh->node_ID[2 * (node_local2global[i] - 1)]; local_mesh->node_ID[2 * i + 1] = global_mesh->node_ID[2 * (node_local2global[i] - 1) + 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_global_node_id(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global) { int i; HECMW_assert(local_mesh->n_node > 0); HECMW_assert(global_mesh->global_node_ID); local_mesh->global_node_ID = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_node); if (local_mesh->global_node_ID == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_node; i++) { local_mesh->global_node_ID[i] = global_mesh->global_node_ID[node_local2global[i] - 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_init_val_index( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global) { int old_idx; int i; HECMW_assert(local_mesh->hecmw_flag_initcon); HECMW_assert(local_mesh->n_node > 0); HECMW_assert(global_mesh->node_init_val_index); local_mesh->node_init_val_index = (int *)HECMW_calloc(local_mesh->n_node + 1, sizeof(int)); if (local_mesh->node_init_val_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_node; i++) { old_idx = node_local2global[i] - 1; local_mesh->node_init_val_index[i + 1] = local_mesh->node_init_val_index[i] + global_mesh->node_init_val_index[old_idx + 1] - global_mesh->node_init_val_index[old_idx]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_init_val_item( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global) { int size; int counter; int i, j, gstart, gend, lstart, lend; HECMW_assert(local_mesh->hecmw_flag_initcon); HECMW_assert(local_mesh->n_node > 0); HECMW_assert(local_mesh->node_init_val_index); HECMW_assert(global_mesh->node_init_val_item); if (local_mesh->node_init_val_index[local_mesh->n_node] == 0) { local_mesh->node_init_val_item = NULL; return 0; } size = sizeof(double) * local_mesh->node_init_val_index[local_mesh->n_node]; local_mesh->node_init_val_item = (double *)HECMW_malloc(size); if (local_mesh->node_init_val_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < local_mesh->n_node; i++) { gstart = global_mesh->node_init_val_index[node_local2global[i] - 1]; gend = global_mesh->node_init_val_index[node_local2global[i]]; lstart = local_mesh->node_init_val_index[i]; lend = local_mesh->node_init_val_index[i + 1]; HECMW_assert(gend - gstart == lend - lstart); for (j = 0; j < lend - lstart; j++) { local_mesh->node_init_val_item[lstart + j] = global_mesh->node_init_val_item[gstart + j]; counter++; } HECMW_assert(counter == local_mesh->node_init_val_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_local2global, const char *node_flag, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_local2global); HECMW_assert(node_flag); rtc = const_n_dof(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_n_dof_grp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: rtc = const_node_dof_index_mod(global_mesh, local_mesh, node_flag, current_domain); break; case HECMW_FLAG_PARTTYPE_ELEMBASED: rtc = const_node_dof_index(global_mesh, local_mesh, node_flag); break; default: HECMW_set_error(errno, ""); goto error; } if (rtc != RTC_NORMAL) goto error; rtc = const_node_dof_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_node(global_mesh, local_mesh, node_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_node_id(global_mesh, local_mesh, node_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_global_node_id(global_mesh, local_mesh, node_local2global); if (rtc != RTC_NORMAL) goto error; if (local_mesh->hecmw_flag_initcon) { rtc = const_node_init_val_index(global_mesh, local_mesh, node_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_node_init_val_item(global_mesh, local_mesh, node_local2global); if (rtc != RTC_NORMAL) goto error; } return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_elem_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { HECMW_assert(global_mesh->n_elem_type > 0); local_mesh->n_elem_type = global_mesh->n_elem_type; HECMW_assert(local_mesh->n_elem_type > 0); return RTC_NORMAL; } static int const_elem_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int i; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(global_mesh->elem_type); local_mesh->elem_type = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_elem); if (local_mesh->elem_type == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { local_mesh->elem_type[i] = global_mesh->elem_type[elem_local2global[i] - 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_type_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { int counter; int i, j; HECMW_assert(local_mesh->n_elem_type > 0); HECMW_assert(global_mesh->n_elem_type > 0); HECMW_assert(global_mesh->elem_type_index); local_mesh->elem_type_index = (int *)HECMW_calloc(local_mesh->n_elem_type + 1, sizeof(int)); if (local_mesh->elem_type_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < global_mesh->n_elem_type; i++) { for (j = global_mesh->elem_type_index[i]; j < global_mesh->elem_type_index[i + 1]; j++) { if (elem_global2local[j]) counter++; } local_mesh->elem_type_index[i + 1] = counter; } HECMW_assert(local_mesh->elem_type_index[local_mesh->n_elem_type] == local_mesh->n_elem); return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_elem_type_index_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int domain) { int counter; int i, j, idx1, idx2, elem_tmp, elem1, elem2, n_int, n_bnd, n_out, maxe; HECMW_assert(local_mesh->n_elem_type > 0); HECMW_assert(global_mesh->n_elem_type > 0); HECMW_assert(global_mesh->elem_type_index); local_mesh->elem_type_index = (int *)HECMW_calloc(local_mesh->n_elem_type + 1, sizeof(int)); if (local_mesh->elem_type_index == NULL) { HECMW_set_error(errno, ""); goto error; } n_int = n_int_elist[domain]; n_bnd = n_bnd_elist[2 * domain]; n_out = n_bnd_elist[2 * domain + 1] - n_bnd_elist[2 * domain]; maxe = global_mesh->n_elem + 1; for (counter = 0, i = 0; i < global_mesh->n_elem_type; i++) { elem1 = (n_int == 0) ? maxe : int_elist[domain][0]; elem2 = (n_out == 0) ? maxe : bnd_elist[domain][n_bnd]; for (idx1 = 0, idx2 = 0, j = 0; j < n_int + n_out; j++) { if (elem1 < elem2) { elem_tmp = elem1 - 1; idx1++; elem1 = (idx1 == n_int) ? maxe : int_elist[domain][idx1]; } else { elem_tmp = elem2 - 1; idx2++; elem2 = (idx2 == n_out) ? maxe : bnd_elist[domain][idx2 + n_bnd]; } if (elem_tmp >= global_mesh->elem_type_index[i] && elem_tmp < global_mesh->elem_type_index[i + 1]) { counter++; } } local_mesh->elem_type_index[i + 1] = counter; } HECMW_assert(local_mesh->elem_type_index[local_mesh->n_elem_type] == local_mesh->n_elem); return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_type_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { HECMW_assert(global_mesh->elem_type_item); local_mesh->elem_type_item = global_mesh->elem_type_item; return RTC_NORMAL; } static int const_elem_node_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int old_idx; int i; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(global_mesh->elem_node_index); local_mesh->elem_node_index = (int *)HECMW_calloc(local_mesh->n_elem + 1, sizeof(int)); if (local_mesh->elem_node_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { old_idx = elem_local2global[i] - 1; local_mesh->elem_node_index[i + 1] = local_mesh->elem_node_index[i] + global_mesh->elem_node_index[old_idx + 1] - global_mesh->elem_node_index[old_idx]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_node_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *elem_local2global) { int node; int size; int counter; int i, j, gstart, gend, lstart, lend; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(local_mesh->elem_node_index); HECMW_assert(local_mesh->elem_node_index[local_mesh->n_elem] > 0); HECMW_assert(global_mesh->elem_node_item); size = sizeof(int) * local_mesh->elem_node_index[local_mesh->n_elem]; local_mesh->elem_node_item = (int *)HECMW_malloc(size); if (local_mesh->elem_node_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < local_mesh->n_elem; i++) { gstart = global_mesh->elem_node_index[elem_local2global[i] - 1]; gend = global_mesh->elem_node_index[elem_local2global[i]]; lstart = local_mesh->elem_node_index[i]; lend = local_mesh->elem_node_index[i + 1]; for (j = 0; j < lend - lstart; j++) { node = global_mesh->elem_node_item[gstart + j]; local_mesh->elem_node_item[lstart + j] = node_global2local[node - 1]; counter++; } HECMW_assert(counter == local_mesh->elem_node_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_id(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int i; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(global_mesh->elem_ID); local_mesh->elem_ID = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_elem * 2); if (local_mesh->elem_ID == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { local_mesh->elem_ID[2 * i] = global_mesh->elem_ID[2 * (elem_local2global[i] - 1)]; local_mesh->elem_ID[2 * i + 1] = global_mesh->elem_ID[2 * (elem_local2global[i] - 1) + 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_global_elem_id(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int i; HECMW_assert(local_mesh->n_elem); HECMW_assert(global_mesh->global_elem_ID); local_mesh->global_elem_ID = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_elem); if (local_mesh->global_elem_ID == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { local_mesh->global_elem_ID[i] = global_mesh->global_elem_ID[elem_local2global[i] - 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_section_id(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int i; HECMW_assert(local_mesh->n_elem); HECMW_assert(global_mesh->section_ID); local_mesh->section_ID = (int *)HECMW_malloc(sizeof(int) * local_mesh->n_elem); if (local_mesh->section_ID == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { local_mesh->section_ID[i] = global_mesh->section_ID[elem_local2global[i] - 1]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_mat_id_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int old_idx; int i; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(global_mesh->elem_mat_ID_index); local_mesh->elem_mat_ID_index = (int *)HECMW_calloc(local_mesh->n_elem + 1, sizeof(int)); if (local_mesh->elem_mat_ID_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < local_mesh->n_elem; i++) { old_idx = elem_local2global[i] - 1; local_mesh->elem_mat_ID_index[i + 1] = local_mesh->elem_mat_ID_index[i] + global_mesh->elem_mat_ID_index[old_idx + 1] - global_mesh->elem_mat_ID_index[old_idx]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_n_elem_mat_id(struct hecmwST_local_mesh *local_mesh) { HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(local_mesh->elem_mat_ID_index); local_mesh->n_elem_mat_ID = local_mesh->elem_mat_ID_index[local_mesh->n_elem]; return RTC_NORMAL; } static int const_elem_mat_id_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_local2global) { int size; int counter; int i, j, gstart, gend, lstart, lend; HECMW_assert(local_mesh->n_elem > 0); HECMW_assert(local_mesh->elem_mat_ID_index[local_mesh->n_elem] >= 0); if (local_mesh->elem_mat_ID_index[local_mesh->n_elem] == 0) { local_mesh->elem_mat_ID_item = NULL; return RTC_NORMAL; } size = sizeof(int) * local_mesh->elem_mat_ID_index[local_mesh->n_elem]; local_mesh->elem_mat_ID_item = (int *)HECMW_malloc(size); if (local_mesh->elem_mat_ID_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < local_mesh->n_elem; i++) { gstart = global_mesh->elem_mat_ID_index[elem_local2global[i] - 1]; gend = global_mesh->elem_mat_ID_index[elem_local2global[i]]; lstart = local_mesh->elem_mat_ID_index[i]; lend = local_mesh->elem_mat_ID_index[i + 1]; HECMW_assert(lend - lstart == gend - gstart); for (j = 0; j < lend - lstart; j++) { local_mesh->elem_mat_ID_item[lstart + j] = global_mesh->elem_mat_ID_item[gstart + j]; counter++; } HECMW_assert(counter == local_mesh->elem_mat_ID_index[i + 1]); } HECMW_assert(counter == local_mesh->n_elem_mat_ID); return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *elem_global2local, const int *elem_local2global, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(elem_global2local); HECMW_assert(elem_local2global); rtc = const_n_elem_type(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_type(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = const_elem_type_index_mod(global_mesh, local_mesh, elem_global2local, current_domain); } else { rtc = const_elem_type_index(global_mesh, local_mesh, elem_global2local); } if (rtc != RTC_NORMAL) goto error; rtc = const_elem_type_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_node_index(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_node_item(global_mesh, local_mesh, node_global2local, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_id(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_global_elem_id(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_section_id(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_mat_id_index(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; rtc = const_n_elem_mat_id(local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_mat_id_item(global_mesh, local_mesh, elem_local2global); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_hecmw_comm(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->HECMW_COMM = global_mesh->HECMW_COMM; return RTC_NORMAL; } static int const_zero(struct hecmwST_local_mesh *local_mesh, int current_domain) { local_mesh->zero = (current_domain == 0) ? 1 : 0; return RTC_NORMAL; } static int const_petot(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->PETOT = global_mesh->n_subdomain; return RTC_NORMAL; } static int const_pesmptot(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->PEsmpTOT = global_mesh->PEsmpTOT; return RTC_NORMAL; } static int const_my_rank(struct hecmwST_local_mesh *local_mesh, int current_domain) { local_mesh->my_rank = current_domain; return RTC_NORMAL; } static int const_errnof(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->errnof = global_mesh->errnof; return RTC_NORMAL; } static int const_n_subdomain(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->n_subdomain = global_mesh->n_subdomain; return RTC_NORMAL; } static int const_import_item(struct hecmwST_local_mesh *local_mesh, const int *global2local) { int new_id; int i; if (local_mesh->n_neighbor_pe == 0) { local_mesh->import_item = NULL; return RTC_NORMAL; } HECMW_assert(local_mesh->n_neighbor_pe > 0); HECMW_assert(local_mesh->import_index); HECMW_assert(local_mesh->import_index[local_mesh->n_neighbor_pe] > 0); HECMW_assert(local_mesh->import_item); for (i = 0; i < local_mesh->import_index[local_mesh->n_neighbor_pe]; i++) { new_id = global2local[local_mesh->import_item[i] - 1]; local_mesh->import_item[i] = new_id; } return RTC_NORMAL; } static int const_export_item(struct hecmwST_local_mesh *local_mesh, const int *global2local) { int new_id; int i; if (local_mesh->n_neighbor_pe == 0) { local_mesh->export_item = NULL; return RTC_NORMAL; } HECMW_assert(local_mesh->n_neighbor_pe > 0); HECMW_assert(local_mesh->export_index); HECMW_assert(local_mesh->export_index[local_mesh->n_neighbor_pe] > 0); HECMW_assert(local_mesh->export_item); for (i = 0; i < local_mesh->export_index[local_mesh->n_neighbor_pe]; i++) { new_id = global2local[local_mesh->export_item[i] - 1]; local_mesh->export_item[i] = new_id; } return RTC_NORMAL; } static int const_shared_item(struct hecmwST_local_mesh *local_mesh, const int *global2local) { int new_id; int i; if (local_mesh->n_neighbor_pe == 0) { local_mesh->shared_item = NULL; return RTC_NORMAL; } HECMW_assert(local_mesh->n_neighbor_pe > 0); HECMW_assert(local_mesh->shared_index); HECMW_assert(local_mesh->shared_index[local_mesh->n_neighbor_pe] > 0); HECMW_assert(local_mesh->shared_item); for (i = 0; i < local_mesh->shared_index[local_mesh->n_neighbor_pe]; i++) { new_id = global2local[local_mesh->shared_item[i] - 1]; local_mesh->shared_item[i] = new_id; } return RTC_NORMAL; } static int const_comm_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *elem_global2local, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(node_global2local); HECMW_assert(elem_global2local); rtc = const_hecmw_comm(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_zero(local_mesh, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_petot(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_pesmptot(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_my_rank(local_mesh, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_errnof(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_n_subdomain(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: rtc = const_import_item(local_mesh, node_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_export_item(local_mesh, node_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_shared_item(local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; break; case HECMW_FLAG_PARTTYPE_ELEMBASED: rtc = const_import_item(local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_export_item(local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_shared_item(local_mesh, node_global2local); if (rtc != RTC_NORMAL) goto error; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", global_mesh->hecmw_flag_parttype); goto error; } return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_adapt(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->n_adapt = global_mesh->n_adapt; return RTC_NORMAL; } static int const_coarse_grid_level(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->coarse_grid_level = global_mesh->coarse_grid_level; return RTC_NORMAL; } static int const_when_i_was_refined_node( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->when_i_was_refined_node = global_mesh->when_i_was_refined_node; return RTC_NORMAL; } static int const_when_i_was_refined_elem( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->when_i_was_refined_elem = global_mesh->when_i_was_refined_elem; return RTC_NORMAL; } static int const_adapt_parent_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_parent_type = global_mesh->adapt_parent_type; return RTC_NORMAL; } static int const_adapt_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_type = global_mesh->adapt_type; return RTC_NORMAL; } static int const_adapt_level(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_level = global_mesh->adapt_level; return RTC_NORMAL; } static int const_adapt_parent(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_parent = global_mesh->adapt_parent; return RTC_NORMAL; } static int const_adapt_children_index( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_children_index = global_mesh->adapt_children_index; return RTC_NORMAL; } static int const_adapt_children_item( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->adapt_children_item = global_mesh->adapt_children_item; return RTC_NORMAL; } static int const_adapt_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); rtc = const_n_adapt(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_coarse_grid_level(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_when_i_was_refined_node(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_when_i_was_refined_elem(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_parent_type(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_type(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_level(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_parent(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_children_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_children_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_sect(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->n_sect = global_mesh->section->n_sect; return RTC_NORMAL; } static int const_sect_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_type = global_mesh->section->sect_type; return RTC_NORMAL; } static int const_sect_opt(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_opt = global_mesh->section->sect_opt; return RTC_NORMAL; } static int const_sect_mat_id_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_mat_ID_index = global_mesh->section->sect_mat_ID_index; return RTC_NORMAL; } static int const_sect_mat_id_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_mat_ID_item = global_mesh->section->sect_mat_ID_item; return RTC_NORMAL; } static int const_sect_i_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_I_index = global_mesh->section->sect_I_index; return RTC_NORMAL; } static int const_sect_i_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_I_item = global_mesh->section->sect_I_item; return RTC_NORMAL; } static int const_sect_r_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_R_index = global_mesh->section->sect_R_index; return RTC_NORMAL; } static int const_sect_r_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->section->sect_R_item = global_mesh->section->sect_R_item; return RTC_NORMAL; } static int const_sect_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(local_mesh); HECMW_assert(global_mesh->section); HECMW_assert(local_mesh->section); rtc = const_n_sect(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_type(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_opt(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_mat_id_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_mat_id_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_i_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_i_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_r_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_r_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_mat(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->n_mat = global_mesh->material->n_mat; return RTC_NORMAL; } static int const_n_mat_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->n_mat_item = global_mesh->material->n_mat_item; return RTC_NORMAL; } static int const_n_mat_subitem(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->n_mat_subitem = global_mesh->material->n_mat_subitem; return RTC_NORMAL; } static int const_n_mat_table(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->n_mat_table = global_mesh->material->n_mat_table; return RTC_NORMAL; } static int const_mat_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_name = global_mesh->material->mat_name; return RTC_NORMAL; } static int const_mat_item_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_item_index = global_mesh->material->mat_item_index; return RTC_NORMAL; } static int const_mat_subitem_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_subitem_index = global_mesh->material->mat_subitem_index; return RTC_NORMAL; } static int const_mat_table_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_table_index = global_mesh->material->mat_table_index; return RTC_NORMAL; } static int const_mat_val(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_val = global_mesh->material->mat_val; return RTC_NORMAL; } static int const_mat_temp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->material->mat_temp = global_mesh->material->mat_temp; return RTC_NORMAL; } static int const_mat_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->material); HECMW_assert(local_mesh); HECMW_assert(local_mesh->material); rtc = const_n_mat(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_n_mat_item(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_n_mat_subitem(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_n_mat_table(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_item_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_subitem_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_table_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_val(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_temp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_mpc(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int node, diff, evalsum, counter; int i, j; for (counter = 0, i = 0; i < mpc_global->n_mpc; i++) { diff = mpc_global->mpc_index[i + 1] - mpc_global->mpc_index[i]; evalsum = 0; for (j = mpc_global->mpc_index[i]; j < mpc_global->mpc_index[i + 1]; j++) { node = mpc_global->mpc_item[j]; if (node_global2local[node - 1] > 0) evalsum++; } if (evalsum == diff) { MASK_BIT(mpc_flag[i], MASK); counter++; } } mpc_local->n_mpc = counter; return RTC_NORMAL; } static int const_mpc_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int counter; int i; mpc_local->mpc_index = (int *)HECMW_calloc(mpc_local->n_mpc + 1, sizeof(int)); if (local_mesh->mpc->mpc_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < mpc_global->n_mpc; i++) { if (EVAL_BIT(mpc_flag[i], MASK)) { mpc_local->mpc_index[counter + 1] = mpc_local->mpc_index[counter] + mpc_global->mpc_index[i + 1] - mpc_global->mpc_index[i]; counter++; } } HECMW_assert(counter == mpc_local->n_mpc); return RTC_NORMAL; error: return RTC_ERROR; } static int const_mpc_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int mcounter, icounter; int i, j; mpc_local->mpc_item = (int *)HECMW_malloc(sizeof(int) * mpc_local->mpc_index[mpc_local->n_mpc]); if (mpc_local->mpc_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (mcounter = 0, icounter = 0, i = 0; i < mpc_global->n_mpc; i++) { if (EVAL_BIT(mpc_flag[i], MASK)) { for (j = mpc_global->mpc_index[i]; j < mpc_global->mpc_index[i + 1]; j++) { mpc_local->mpc_item[mcounter++] = node_global2local[mpc_global->mpc_item[j] - 1]; } HECMW_assert(mcounter == mpc_local->mpc_index[++icounter]); } } HECMW_assert(icounter == mpc_local->n_mpc); HECMW_assert(mcounter == mpc_local->mpc_index[mpc_local->n_mpc]); return RTC_NORMAL; error: return RTC_ERROR; } static int const_mpc_dof(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int mcounter, icounter; int i, j; mpc_local->mpc_dof = (int *)HECMW_malloc(sizeof(int) * mpc_local->mpc_index[mpc_local->n_mpc]); if (local_mesh->mpc->mpc_dof == NULL) { HECMW_set_error(errno, ""); goto error; } for (mcounter = 0, icounter = 0, i = 0; i < mpc_global->n_mpc; i++) { if (EVAL_BIT(mpc_flag[i], MASK)) { for (j = mpc_global->mpc_index[i]; j < mpc_global->mpc_index[i + 1]; j++) { mpc_local->mpc_dof[mcounter++] = mpc_global->mpc_dof[j]; } HECMW_assert(mcounter == mpc_local->mpc_index[++icounter]); } } HECMW_assert(icounter == mpc_local->n_mpc); HECMW_assert(mcounter == mpc_local->mpc_index[mpc_local->n_mpc]); return RTC_NORMAL; error: return RTC_ERROR; } static int const_mpc_val(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int size; int mcounter, icounter; int i, j; size = sizeof(double) * mpc_local->mpc_index[mpc_local->n_mpc]; mpc_local->mpc_val = (double *)HECMW_malloc(size); if (local_mesh->mpc->mpc_val == NULL) { HECMW_set_error(errno, ""); goto error; } for (mcounter = 0, icounter = 0, i = 0; i < mpc_global->n_mpc; i++) { if (EVAL_BIT(mpc_flag[i], MASK)) { for (j = mpc_global->mpc_index[i]; j < mpc_global->mpc_index[i + 1]; j++) { mpc_local->mpc_val[mcounter++] = mpc_global->mpc_val[j]; } HECMW_assert(mcounter == mpc_local->mpc_index[++icounter]); } } HECMW_assert(icounter == local_mesh->mpc->n_mpc); HECMW_assert(mcounter == local_mesh->mpc->mpc_index[local_mesh->mpc->n_mpc]); return RTC_NORMAL; error: return RTC_ERROR; } static int const_mpc_const(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const char *mpc_flag) { struct hecmwST_mpc *mpc_global = global_mesh->mpc; struct hecmwST_mpc *mpc_local = local_mesh->mpc; int size; int icounter; int i; size = sizeof(double) * mpc_local->n_mpc; mpc_local->mpc_const = (double *)HECMW_malloc(size); if (local_mesh->mpc->mpc_const == NULL) { HECMW_set_error(errno, ""); goto error; } for (icounter = 0, i = 0; i < mpc_global->n_mpc; i++) { if (EVAL_BIT(mpc_flag[i], MASK)) { mpc_local->mpc_const[icounter] = mpc_global->mpc_const[i]; icounter++; } } HECMW_assert(icounter == local_mesh->mpc->n_mpc); return RTC_NORMAL; error: return RTC_ERROR; } static int const_mpc_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local) { char *mpc_flag = NULL; int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->mpc); HECMW_assert(local_mesh); HECMW_assert(local_mesh->mpc); HECMW_assert(node_global2local); if (global_mesh->mpc->n_mpc == 0) { init_struct_mpc(local_mesh); return RTC_NORMAL; } mpc_flag = (char *)HECMW_calloc(global_mesh->mpc->n_mpc, sizeof(char)); if (mpc_flag == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = const_n_mpc(global_mesh, local_mesh, node_global2local, mpc_flag); if (rtc != RTC_NORMAL) goto error; if (local_mesh->mpc->n_mpc == 0) { init_struct_mpc(local_mesh); HECMW_free(mpc_flag); return RTC_NORMAL; } rtc = const_mpc_index(global_mesh, local_mesh, mpc_flag); if (rtc != RTC_NORMAL) goto error; rtc = const_mpc_item(global_mesh, local_mesh, node_global2local, mpc_flag); if (rtc != RTC_NORMAL) goto error; rtc = const_mpc_dof(global_mesh, local_mesh, mpc_flag); if (rtc != RTC_NORMAL) goto error; rtc = const_mpc_val(global_mesh, local_mesh, mpc_flag); if (rtc != RTC_NORMAL) goto error; rtc = const_mpc_const(global_mesh, local_mesh, mpc_flag); if (rtc != RTC_NORMAL) goto error; HECMW_free(mpc_flag); return RTC_NORMAL; error: HECMW_free(mpc_flag); return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_n_amp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->n_amp = global_mesh->amp->n_amp; return RTC_NORMAL; } static int const_amp_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_name = global_mesh->amp->amp_name; return RTC_NORMAL; } static int const_amp_type_definition( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_type_definition = global_mesh->amp->amp_type_definition; return RTC_NORMAL; } static int const_amp_type_time(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_type_time = global_mesh->amp->amp_type_time; return RTC_NORMAL; } static int const_amp_type_value(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_type_value = global_mesh->amp->amp_type_value; return RTC_NORMAL; } static int const_amp_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_index = global_mesh->amp->amp_index; return RTC_NORMAL; } static int const_amp_val(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_val = global_mesh->amp->amp_val; return RTC_NORMAL; } static int const_amp_table(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->amp->amp_table = global_mesh->amp->amp_table; return RTC_NORMAL; } static int const_amp_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->amp); HECMW_assert(local_mesh); HECMW_assert(local_mesh->amp); if (global_mesh->amp->n_amp == 0) { init_struct_amp(local_mesh); return RTC_NORMAL; } rtc = const_n_amp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_type_definition(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_type_time(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_type_value(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_index(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_val(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_table(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int *const_node_grp_mask_eqn( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, int eqn_block_idx) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; int *n_eqn_item = NULL; int diff, evalsum; int i, j, is, ie, js; is = node_group_global->grp_index[eqn_block_idx]; ie = node_group_global->grp_index[eqn_block_idx + 1]; n_eqn_item = (int *)HECMW_malloc(sizeof(int) * (ie - is)); if (n_eqn_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (js = 0, i = 0; i < ie - is; i++) { diff = node_group_global->grp_item[is + i] - js; for (evalsum = 0, j = js; j < node_group_global->grp_item[is + i]; j++) { if (node_global2local[j] > 0 && node_global2local[j] <= local_mesh->nn_internal) evalsum++; } if (evalsum) { HECMW_assert(evalsum == diff); n_eqn_item[i] = diff; } else { n_eqn_item[i] = 0; } js = node_group_global->grp_item[is + i]; } return n_eqn_item; error: return NULL; } static int const_node_n_grp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->node_group->n_grp = global_mesh->node_group->n_grp; return RTC_NORMAL; } static int const_node_grp_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->node_group->grp_name = global_mesh->node_group->grp_name; return RTC_NORMAL; } static int const_node_grp_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *n_eqn_item, int eqn_block_idx) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; struct hecmwST_node_grp *node_group_local = local_mesh->node_group; int node; int counter, diff; int i, j; node_group_local->grp_index = (int *)HECMW_calloc(node_group_local->n_grp + 1, sizeof(int)); if (node_group_local->grp_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < node_group_global->n_grp; i++) { if (i != eqn_block_idx) { for (j = node_group_global->grp_index[i]; j < node_group_global->grp_index[i + 1]; j++) { node = node_group_global->grp_item[j]; if (node_global2local[node - 1]) counter++; } } else { diff = node_group_global->grp_index[i + 1] - node_group_global->grp_index[i]; for (j = 0; j < diff; j++) { if (n_eqn_item[j] > 0) counter++; } } node_group_local->grp_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_node_grp_index_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *n_eqn_item, int eqn_block_idx, int domain) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; struct hecmwST_node_grp *node_group_local = local_mesh->node_group; int node; int counter, diff; int i, j; node_group_local->grp_index = (int *)HECMW_calloc(node_group_local->n_grp + 1, sizeof(int)); if (node_group_local->grp_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < node_group_global->n_grp; i++) { if (i != eqn_block_idx) { if (node_group_global->grp_index[i + 1] - node_group_global->grp_index[i] == global_mesh->n_node) { counter += n_int_nlist[domain]; counter += n_bnd_nlist[2 * domain + 1] - n_bnd_nlist[2 * domain]; } else { counter += ngrp_idx[domain][i + 1] - ngrp_idx[domain][i]; /* for( j=node_group_global->grp_index[i]; j<node_group_global->grp_index[i+1]; j++ ) { node = node_group_global->grp_item[j]; if( node_global2local[node-1] ) counter++; } */ } } else { diff = node_group_global->grp_index[i + 1] - node_group_global->grp_index[i]; for (j = 0; j < diff; j++) { if (n_eqn_item[j] > 0) counter++; } } node_group_local->grp_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_grp_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *n_eqn_item, int eqn_block_idx) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; struct hecmwST_node_grp *node_group_local = local_mesh->node_group; int node; int size; int counter; int i, j, k, js, je, ks, ls; size = sizeof(int) * node_group_local->grp_index[node_group_local->n_grp]; node_group_local->grp_item = (int *)HECMW_malloc(size); if (node_group_local->grp_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < node_group_global->n_grp; i++) { if (i != eqn_block_idx) { for (j = node_group_global->grp_index[i]; j < node_group_global->grp_index[i + 1]; j++) { node = node_group_global->grp_item[j]; if (node_global2local[node - 1]) { node_group_local->grp_item[counter++] = node_global2local[node - 1]; } } } else { js = node_group_global->grp_index[i]; je = node_group_global->grp_index[i + 1]; for (ks = 0, ls = 0, j = js; j < je; j++) { if (n_eqn_item[j - js]) { HECMW_assert(n_eqn_item[j - js] == node_group_global->grp_item[j] - ks); node_group_local->grp_item[counter] = ls + n_eqn_item[j - js]; for (k = ks; k < node_group_global->grp_item[j]; k++) { HECMW_assert(ls < node_global2local[k] && node_global2local[k] <= node_group_local->grp_item[counter]); } ls = node_group_local->grp_item[counter]; counter++; } ks = node_group_global->grp_item[j]; } } HECMW_assert(counter == node_group_local->grp_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_node_grp_item_mod(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, const int *n_eqn_item, int eqn_block_idx, int domain) { struct hecmwST_node_grp *node_group_global = global_mesh->node_group; struct hecmwST_node_grp *node_group_local = local_mesh->node_group; int node; int size; int counter; int i, j, k, js, je, ks, ls; int idx1, idx2, node1, node2, n_int, n_bnd, n_out, maxn; size = sizeof(int) * node_group_local->grp_index[node_group_local->n_grp]; node_group_local->grp_item = (int *)HECMW_malloc(size); if (node_group_local->grp_item == NULL) { HECMW_set_error(errno, ""); goto error; } n_int = n_int_nlist[domain]; n_bnd = n_bnd_nlist[2 * domain]; n_out = n_bnd_nlist[2 * domain + 1] - n_bnd_nlist[2 * domain]; maxn = global_mesh->n_node + 1; for (counter = 0, i = 0; i < node_group_global->n_grp; i++) { if (i != eqn_block_idx) { if (node_group_global->grp_index[i + 1] - node_group_global->grp_index[i] == global_mesh->n_node) { idx1 = 0; idx2 = 0; node1 = (n_int == 0) ? maxn : int_nlist[domain][0]; node2 = (n_out == 0) ? maxn : bnd_nlist[domain][n_bnd]; for (j = 0; j < n_int + n_out; j++) { if (node1 < node2) { node_group_local->grp_item[counter++] = node_global2local[node1 - 1]; idx1++; node1 = (idx1 == n_int) ? maxn : int_nlist[domain][idx1]; } else { node_group_local->grp_item[counter++] = node_global2local[node2 - 1]; idx2++; node2 = (idx2 == n_out) ? maxn : bnd_nlist[domain][idx2 + n_bnd]; } } } else { if (ngrp_idx[domain][i + 1] - ngrp_idx[domain][i] == 0) continue; for (j = ngrp_idx[domain][i]; j < ngrp_idx[domain][i + 1]; j++) { node = ngrp_item[domain][j]; node_group_local->grp_item[counter++] = node_global2local[node - 1]; } } } else { js = node_group_global->grp_index[i]; je = node_group_global->grp_index[i + 1]; for (ks = 0, ls = 0, j = js; j < je; j++) { if (n_eqn_item[j - js]) { HECMW_assert(n_eqn_item[j - js] == node_group_global->grp_item[j] - ks); node_group_local->grp_item[counter] = ls + n_eqn_item[j - js]; for (k = ks; k < node_group_global->grp_item[j]; k++) { HECMW_assert(ls < node_global2local[k] && node_global2local[k] <= node_group_local->grp_item[counter]); } ls = node_group_local->grp_item[counter]; counter++; } ks = node_group_global->grp_item[j]; } } HECMW_assert(counter == node_group_local->grp_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } static int const_node_grp_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *node_global2local, int current_domain) { int *n_eqn_item = NULL; int eqn_block_idx; int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->node_group); HECMW_assert(local_mesh); HECMW_assert(local_mesh->node_group); HECMW_assert(node_global2local); if (global_mesh->node_group->n_grp == 0) { init_struct_node_grp(local_mesh); return RTC_NORMAL; } eqn_block_idx = search_eqn_block_idx(global_mesh); if (eqn_block_idx >= 0) { n_eqn_item = const_node_grp_mask_eqn(global_mesh, local_mesh, node_global2local, eqn_block_idx); if (n_eqn_item == NULL) goto error; } rtc = const_node_n_grp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_node_grp_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = const_node_grp_index_mod(global_mesh, local_mesh, node_global2local, n_eqn_item, eqn_block_idx, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_node_grp_item_mod(global_mesh, local_mesh, node_global2local, n_eqn_item, eqn_block_idx, current_domain); if (rtc != RTC_NORMAL) goto error; } else { rtc = const_node_grp_index(global_mesh, local_mesh, node_global2local, n_eqn_item, eqn_block_idx); if (rtc != RTC_NORMAL) goto error; rtc = const_node_grp_item(global_mesh, local_mesh, node_global2local, n_eqn_item, eqn_block_idx); if (rtc != RTC_NORMAL) goto error; } HECMW_free(n_eqn_item); return RTC_NORMAL; error: HECMW_free(n_eqn_item); return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_elem_n_grp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->elem_group->n_grp = global_mesh->elem_group->n_grp; return RTC_NORMAL; } static int const_elem_grp_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->elem_group->grp_name = global_mesh->elem_group->grp_name; return RTC_NORMAL; } static int const_elem_grp_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { struct hecmwST_elem_grp *elem_group_global = global_mesh->elem_group; struct hecmwST_elem_grp *elem_group_local = local_mesh->elem_group; int elem; int counter; int i, j; elem_group_local->grp_index = (int *)HECMW_calloc(elem_group_local->n_grp + 1, sizeof(int)); if (elem_group_local->grp_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < elem_group_global->n_grp; i++) { for (j = elem_group_global->grp_index[i]; j < elem_group_global->grp_index[i + 1]; j++) { elem = elem_group_global->grp_item[j]; if (elem_global2local[elem - 1]) counter++; } elem_group_local->grp_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_elem_grp_index_mod( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int domain) { struct hecmwST_elem_grp *elem_group_global = global_mesh->elem_group; struct hecmwST_elem_grp *elem_group_local = local_mesh->elem_group; int elem; int counter; int i, j, idx1, idx2, elem1, elem2; elem_group_local->grp_index = (int *)HECMW_calloc(elem_group_local->n_grp + 1, sizeof(int)); if (elem_group_local->grp_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < elem_group_global->n_grp; i++) { if (elem_group_global->grp_index[i + 1] - elem_group_global->grp_index[i] == global_mesh->n_elem) { counter += n_int_elist[domain]; counter += n_bnd_elist[2 * domain + 1] - n_bnd_elist[2 * domain]; } else { counter += egrp_idx[domain][i + 1] - egrp_idx[domain][i]; } elem_group_local->grp_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_grp_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { struct hecmwST_elem_grp *elem_group_global = global_mesh->elem_group; struct hecmwST_elem_grp *elem_group_local = local_mesh->elem_group; int elem; int size; int counter; int i, j; size = sizeof(int) * elem_group_local->grp_index[elem_group_local->n_grp]; elem_group_local->grp_item = (int *)HECMW_malloc(size); if (local_mesh->elem_group->grp_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < elem_group_global->n_grp; i++) { for (j = elem_group_global->grp_index[i]; j < elem_group_global->grp_index[i + 1]; j++) { elem = elem_group_global->grp_item[j]; if (elem_global2local[elem - 1]) { elem_group_local->grp_item[counter++] = elem_global2local[elem - 1]; } } HECMW_assert(counter == elem_group_local->grp_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } /*K. Inagaki */ static int const_elem_grp_item_mod(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int domain) { struct hecmwST_elem_grp *elem_group_global = global_mesh->elem_group; struct hecmwST_elem_grp *elem_group_local = local_mesh->elem_group; int elem; int size; int counter; int i, j, idx1, idx2, elem1, elem2, n_int, n_bnd, n_out, maxe; size = sizeof(int) * elem_group_local->grp_index[elem_group_local->n_grp]; elem_group_local->grp_item = (int *)HECMW_malloc(size); if (local_mesh->elem_group->grp_item == NULL) { HECMW_set_error(errno, ""); goto error; } n_int = n_int_elist[domain]; n_bnd = n_bnd_elist[2 * domain]; n_out = n_bnd_elist[2 * domain + 1] - n_bnd_elist[2 * domain]; maxe = global_mesh->n_elem + 1; for (counter = 0, i = 0; i < elem_group_global->n_grp; i++) { if (elem_group_global->grp_index[i + 1] - elem_group_global->grp_index[i] == global_mesh->n_elem) { elem1 = (n_int == 0) ? maxe : int_elist[domain][0]; elem2 = (n_out == 0) ? maxe : bnd_elist[domain][n_bnd]; for (idx1 = 0, idx2 = 0, j = 0; j < n_int + n_out; j++) { if (elem1 < elem2) { elem_group_local->grp_item[counter++] = elem_global2local[elem1 - 1]; idx1++; elem1 = (idx1 == n_int) ? maxe : int_elist[domain][idx1]; } else { elem_group_local->grp_item[counter++] = elem_global2local[elem2 - 1]; idx2++; elem2 = (idx2 == n_out) ? maxe : bnd_elist[domain][idx2 + n_bnd]; } } } else { if (egrp_idx[domain][i + 1] - egrp_idx[domain][i] == 0) continue; for (j = egrp_idx[domain][i]; j < egrp_idx[domain][i + 1]; j++) { elem = egrp_item[domain][j]; elem_group_local->grp_item[counter++] = elem_global2local[elem - 1]; } } HECMW_assert(counter == elem_group_local->grp_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } static int const_elem_grp_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local, int current_domain) { int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->elem_group); HECMW_assert(local_mesh); HECMW_assert(local_mesh->elem_group); HECMW_assert(elem_global2local); if (global_mesh->elem_group->n_grp == 0) { init_struct_elem_grp(local_mesh); return RTC_NORMAL; } rtc = const_elem_n_grp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_grp_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; if (is_spdup_available(global_mesh)) { rtc = const_elem_grp_index_mod(global_mesh, local_mesh, elem_global2local, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_grp_item_mod(global_mesh, local_mesh, elem_global2local, current_domain); if (rtc != RTC_NORMAL) goto error; } else { rtc = const_elem_grp_index(global_mesh, local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_grp_item(global_mesh, local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; } return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_surf_n_grp(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->surf_group->n_grp = global_mesh->surf_group->n_grp; return RTC_NORMAL; } static int const_surf_grp_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->surf_group->grp_name = global_mesh->surf_group->grp_name; return RTC_NORMAL; } static int const_surf_grp_index(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { struct hecmwST_surf_grp *surf_group_global = global_mesh->surf_group; struct hecmwST_surf_grp *surf_group_local = local_mesh->surf_group; int elem; int counter; int i, j; surf_group_local->grp_index = (int *)HECMW_calloc(surf_group_local->n_grp + 1, sizeof(int)); if (surf_group_local->grp_index == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < surf_group_global->n_grp; i++) { for (j = surf_group_global->grp_index[i]; j < surf_group_global->grp_index[i + 1]; j++) { elem = surf_group_global->grp_item[2 * j]; if (elem_global2local[elem - 1]) counter++; } surf_group_local->grp_index[i + 1] = counter; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_surf_grp_item(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { struct hecmwST_surf_grp *surf_group_global = global_mesh->surf_group; struct hecmwST_surf_grp *surf_group_local = local_mesh->surf_group; int elem, surf; int size; int counter; int i, j; size = sizeof(int) * surf_group_local->grp_index[surf_group_local->n_grp] * 2; surf_group_local->grp_item = (int *)HECMW_malloc(size); if (surf_group_local->grp_item == NULL) { HECMW_set_error(errno, ""); goto error; } for (counter = 0, i = 0; i < surf_group_global->n_grp; i++) { for (j = surf_group_global->grp_index[i]; j < surf_group_global->grp_index[i + 1]; j++) { elem = surf_group_global->grp_item[2 * j]; surf = surf_group_global->grp_item[2 * j + 1]; if (elem_global2local[elem - 1]) { surf_group_local->grp_item[2 * counter] = elem_global2local[elem - 1]; surf_group_local->grp_item[2 * counter + 1] = surf; counter++; } } HECMW_assert(counter == surf_group_local->grp_index[i + 1]); } return RTC_NORMAL; error: return RTC_ERROR; } static int const_surf_grp_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const int *elem_global2local) { int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->surf_group); HECMW_assert(local_mesh); HECMW_assert(local_mesh->surf_group); HECMW_assert(elem_global2local); if (global_mesh->surf_group->n_grp == 0) { init_struct_surf_grp(local_mesh); return RTC_NORMAL; } rtc = const_surf_n_grp(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_surf_grp_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_surf_grp_index(global_mesh, local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_surf_grp_item(global_mesh, local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_contact_pair_n_pair( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->contact_pair->n_pair = global_mesh->contact_pair->n_pair; return RTC_NORMAL; } static int const_contact_pair_name(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { local_mesh->contact_pair->name = global_mesh->contact_pair->name; return RTC_NORMAL; } static int const_contact_pair_type(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { struct hecmwST_contact_pair *cpair_global = global_mesh->contact_pair; struct hecmwST_contact_pair *cpair_local = local_mesh->contact_pair; int i; cpair_local->type = (int *)HECMW_calloc(cpair_local->n_pair, sizeof(int)); if (cpair_local->type == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < cpair_global->n_pair; i++) { cpair_local->type[i] = cpair_global->type[i]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_contact_pair_slave_grp_id( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { struct hecmwST_contact_pair *cpair_global = global_mesh->contact_pair; struct hecmwST_contact_pair *cpair_local = local_mesh->contact_pair; int i; cpair_local->slave_grp_id = (int *)HECMW_calloc(cpair_local->n_pair, sizeof(int)); if (cpair_local->slave_grp_id == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < cpair_global->n_pair; i++) { cpair_local->slave_grp_id[i] = cpair_global->slave_grp_id[i]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_contact_pair_master_grp_id( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { struct hecmwST_contact_pair *cpair_global = global_mesh->contact_pair; struct hecmwST_contact_pair *cpair_local = local_mesh->contact_pair; int i; cpair_local->master_grp_id = (int *)HECMW_calloc(cpair_local->n_pair, sizeof(int)); if (cpair_local->master_grp_id == NULL) { HECMW_set_error(errno, ""); goto error; } for (i = 0; i < cpair_global->n_pair; i++) { cpair_local->master_grp_id[i] = cpair_global->master_grp_id[i]; } return RTC_NORMAL; error: return RTC_ERROR; } static int const_contact_pair_info(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh) { int rtc; HECMW_assert(global_mesh); HECMW_assert(global_mesh->contact_pair); HECMW_assert(local_mesh); HECMW_assert(local_mesh->contact_pair); if (global_mesh->contact_pair->n_pair == 0) { init_struct_contact_pair(local_mesh); return RTC_NORMAL; } rtc = const_contact_pair_n_pair(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_contact_pair_name(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_contact_pair_type(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_contact_pair_slave_grp_id(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_contact_pair_master_grp_id(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; return RTC_NORMAL; error: return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int const_local_data(const struct hecmwST_local_mesh *global_mesh, struct hecmwST_local_mesh *local_mesh, const struct hecmw_part_cont_data *cont_data, const char *node_flag, const char *elem_flag, int *node_global2local, int *elem_global2local, int current_domain) { int *node_local2global = NULL; int *elem_local2global = NULL; int rtc, i; HECMW_log(HECMW_LOG_DEBUG, "Starting creation of local mesh data...\n"); rtc = set_node_global2local(global_mesh, local_mesh, node_global2local, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; node_local2global = (int *)HECMW_calloc(local_mesh->n_node, sizeof(int)); if (node_local2global == NULL) { HECMW_set_error(errno, ""); goto error; } if (is_spdup_available(global_mesh)) { rtc = set_node_local2global_mod(global_mesh, local_mesh, node_global2local, node_local2global, current_domain); } else { rtc = set_node_local2global(global_mesh, local_mesh, node_global2local, node_local2global); } if (rtc != RTC_NORMAL) goto error; rtc = set_elem_global2local(global_mesh, local_mesh, elem_global2local, elem_flag, current_domain); if (rtc != RTC_NORMAL) goto error; elem_local2global = (int *)HECMW_calloc(local_mesh->n_elem, sizeof(int)); if (elem_local2global == NULL) { HECMW_set_error(errno, ""); goto error; } if (is_spdup_available(global_mesh)) { rtc = set_elem_local2global_mod(global_mesh, local_mesh, elem_global2local, elem_local2global, current_domain); } else { rtc = set_elem_local2global(global_mesh, local_mesh, elem_global2local, elem_local2global); } if (rtc != RTC_NORMAL) goto error; rtc = const_global_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_node_info(global_mesh, local_mesh, node_local2global, node_flag, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_info(global_mesh, local_mesh, node_global2local, elem_global2local, elem_local2global, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_comm_info(global_mesh, local_mesh, node_global2local, elem_global2local, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_adapt_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_sect_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mat_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_mpc_info(global_mesh, local_mesh, node_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_amp_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = const_node_grp_info(global_mesh, local_mesh, node_global2local, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_elem_grp_info(global_mesh, local_mesh, elem_global2local, current_domain); if (rtc != RTC_NORMAL) goto error; rtc = const_surf_grp_info(global_mesh, local_mesh, elem_global2local); if (rtc != RTC_NORMAL) goto error; rtc = const_contact_pair_info(global_mesh, local_mesh); if (rtc != RTC_NORMAL) goto error; rtc = clear_node_global2local(global_mesh, local_mesh, node_global2local, current_domain); rtc = clear_elem_global2local(global_mesh, local_mesh, elem_global2local, current_domain); HECMW_free(node_local2global); HECMW_free(elem_local2global); HECMW_log(HECMW_LOG_DEBUG, "Creation of local mesh data done\n"); return RTC_NORMAL; error: HECMW_free(node_local2global); HECMW_free(elem_local2global); clean_struct_local_mesh(local_mesh); return RTC_ERROR; } /*================================================================================================== print UCD format data ==================================================================================================*/ static int print_ucd_entire_set_node_data( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_result_data *result_data, const char *node_flag) { int size; int nn_item; int i; result_data->nn_component = 1; result_data->nn_dof = (int *)HECMW_malloc(sizeof(int) * result_data->nn_component); if (result_data->nn_dof == NULL) { HECMW_set_error(errno, ""); goto error; } result_data->nn_dof[0] = 1; result_data->node_label = (char **)HECMW_malloc(sizeof(char *) * result_data->nn_component); if (result_data->node_label == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < result_data->nn_component; i++) { result_data->node_label[i] = NULL; } } for (i = 0; i < result_data->nn_component; i++) { result_data->node_label[i] = (char *)HECMW_malloc(sizeof(char) * (HECMW_NAME_LEN + 1)); if (result_data->node_label[i] == NULL) { HECMW_set_error(errno, ""); goto error; } } strcpy(result_data->node_label[0], "rank_of_node"); for (nn_item = 0, i = 0; i < result_data->nn_component; i++) { nn_item += result_data->nn_dof[i]; } size = sizeof(double) * nn_item * global_mesh->n_node; result_data->node_val_item = (double *)HECMW_malloc(size); if (result_data->node_val_item == NULL) { HECMW_set_error(errno, ""); goto error; } switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: for (i = 0; i < global_mesh->n_node; i++) { result_data->node_val_item[i] = (double)global_mesh->node_ID[2 * i + 1]; } break; case HECMW_FLAG_PARTTYPE_ELEMBASED: for (i = 0; i < global_mesh->n_node; i++) { if (EVAL_BIT(node_flag[i], OVERLAP)) { result_data->node_val_item[i] = (double)global_mesh->n_subdomain + 2.0; } else { result_data->node_val_item[i] = (double)global_mesh->node_ID[2 * i + 1]; } } break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", global_mesh->hecmw_flag_parttype); goto error; } return RTC_NORMAL; error: free_struct_result_data(result_data); return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - * - - - - - - - - - */ static int print_ucd_entire_set_elem_data( const struct hecmwST_local_mesh *global_mesh, struct hecmwST_result_data *result_data, const char *elem_flag) { int size; int ne_item; int i; result_data->ne_component = 1; result_data->ne_dof = (int *)HECMW_malloc(sizeof(int) * result_data->ne_component); if (result_data->ne_dof == NULL) { HECMW_set_error(errno, ""); goto error; } result_data->ne_dof[0] = 1; result_data->elem_label = (char **)HECMW_malloc(sizeof(char *) * result_data->ne_component); if (result_data->elem_label == NULL) { HECMW_set_error(errno, ""); goto error; } else { for (i = 0; i < result_data->ne_component; i++) { result_data->elem_label[i] = NULL; } } for (i = 0; i < result_data->ne_component; i++) { result_data->elem_label[i] = (char *)HECMW_malloc(sizeof(char) * (HECMW_NAME_LEN + 1)); if (result_data->elem_label[i] == NULL) { HECMW_set_error(errno, ""); goto error; } } strcpy(result_data->elem_label[0], "partitioning_image"); /* modify element information*/ for (i = 0; i < global_mesh->n_elem; i++) { switch (global_mesh->elem_type[i]) { case HECMW_ETYPE_SHT6: global_mesh->elem_type[i] = HECMW_ETYPE_SHT1; break; case HECMW_ETYPE_SHQ8: global_mesh->elem_type[i] = HECMW_ETYPE_SHQ1; break; case HECMW_ETYPE_BEM3: global_mesh->elem_type[i] = HECMW_ETYPE_ROD1; break; case HECMW_ETYPE_ROD31: global_mesh->elem_type[i] = HECMW_ETYPE_ROD1; break; } } for (ne_item = 0, i = 0; i < result_data->ne_component; i++) { ne_item += result_data->ne_dof[i]; } size = sizeof(double) * ne_item * global_mesh->n_elem; result_data->elem_val_item = (double *)HECMW_malloc(size); if (result_data->elem_val_item == NULL) { HECMW_set_error(errno, ""); goto error; } switch (global_mesh->hecmw_flag_parttype) { case HECMW_FLAG_PARTTYPE_NODEBASED: for (i = 0; i < global_mesh->n_elem; i++) { if (EVAL_BIT(elem_flag[i], OVERLAP)) { result_data->elem_val_item[i] = (double)global_mesh->n_subdomain + 2.0; } else { result_data->elem_val_item[i] = (double)global_mesh->elem_ID[2 * i + 1]; } } break; case HECMW_FLAG_PARTTYPE_ELEMBASED: for (i = 0; i < global_mesh->n_elem; i++) { result_data->elem_val_item[i] = (double)global_mesh->elem_ID[2 * i + 1]; } break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", global_mesh->hecmw_flag_parttype); goto error; } return RTC_NORMAL; error: free_struct_result_data(result_data); return RTC_ERROR; } /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ static int print_ucd_entire(const struct hecmwST_local_mesh *global_mesh, const char *node_flag, const char *elem_flag, const char *ofname) { struct hecmwST_result_data *result_data; result_data = (struct hecmwST_result_data *)HECMW_malloc( sizeof(struct hecmwST_result_data)); if (result_data == NULL) { HECMW_set_error(errno, ""); goto error; } else { init_struct_result_data(result_data); } if (print_ucd_entire_set_node_data(global_mesh, result_data, node_flag)) { goto error; } if (print_ucd_entire_set_elem_data(global_mesh, result_data, elem_flag)) { goto error; } if (HECMW_ucd_legacy_print(global_mesh, result_data, ofname)) { goto error; } free_struct_result_data(result_data); return RTC_NORMAL; error: free_struct_result_data(result_data); return RTC_ERROR; } static int init_partition(struct hecmwST_local_mesh *global_mesh, struct hecmw_part_cont_data *cont_data) { HECMW_log(HECMW_LOG_DEBUG, "Starting initialization for partitioner..."); /* global_mesh->n_subdomain */ global_mesh->n_subdomain = cont_data->n_domain; /* global_mesh->hecmw_flag_parttype */ switch (cont_data->type) { case HECMW_PART_TYPE_NODE_BASED: /* for node-based partitioning */ global_mesh->hecmw_flag_parttype = HECMW_FLAG_PARTTYPE_NODEBASED; break; case HECMW_PART_TYPE_ELEMENT_BASED: /* for element-based partitioning */ global_mesh->hecmw_flag_parttype = HECMW_FLAG_PARTTYPE_ELEMBASED; break; default: HECMW_set_error(HECMW_PART_E_INVALID_PTYPE, "%d", cont_data->type); goto error; } /* global_mesh->hecmw_flag_partdepth */ global_mesh->hecmw_flag_partdepth = cont_data->depth; /* global_mesh->hecmw_flag_partcontact */ if (global_mesh->contact_pair->n_pair > 0) { switch (cont_data->contact) { case HECMW_PART_CONTACT_AGGREGATE: global_mesh->hecmw_flag_partcontact = HECMW_FLAG_PARTCONTACT_AGGREGATE; break; case HECMW_PART_CONTACT_DISTRIBUTE: global_mesh->hecmw_flag_partcontact = HECMW_FLAG_PARTCONTACT_DISTRIBUTE; break; case HECMW_PART_CONTACT_SIMPLE: global_mesh->hecmw_flag_partcontact = HECMW_FLAG_PARTCONTACT_SIMPLE; break; case HECMW_PART_CONTACT_DEFAULT: default: cont_data->contact = HECMW_PART_CONTACT_SIMPLE; global_mesh->hecmw_flag_partcontact = HECMW_FLAG_PARTCONTACT_SIMPLE; break; } } HECMW_log(HECMW_LOG_DEBUG, "Initialization for partitioner done"); return RTC_NORMAL; error: return RTC_ERROR; ; } /*================================================================================================== main function ==================================================================================================*/ extern struct hecmwST_local_mesh *HECMW_partition_inner( struct hecmwST_local_mesh *global_mesh, struct hecmw_part_cont_data *cont_data) { struct hecmwST_local_mesh *local_mesh = NULL; struct hecmw_ctrl_meshfiles *ofheader = NULL; char *node_flag = NULL; char *elem_flag = NULL; char *node_flag_neighbor = NULL; char *elem_flag_neighbor = NULL; int *node_global2local = NULL; int *elem_global2local = NULL; char ofname[HECMW_FILENAME_LEN + 1]; int *num_elem, *num_node, *num_ielem, *num_inode, *num_nbpe; int *sum_elem, *sum_node, *sum_ielem, *sum_inode, *sum_nbpe; int current_domain, nrank, iS, iE; int rtc; int i; int error_in_ompsection = 0; if (global_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'global_mesh\' is NULL"); goto error; } if (cont_data == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'cont_data\' is NULL"); goto error; } rtc = init_partition(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_init_log(global_mesh->n_subdomain); if (rtc != RTC_NORMAL) goto error; if (global_mesh->my_rank == 0) { rtc = HECMW_part_set_log_part_type(cont_data->type); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_set_log_part_method(cont_data->method); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_set_log_part_depth(cont_data->depth); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_set_log_part_contact(cont_data->contact); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_set_log_n_node_g(global_mesh->n_node); if (rtc != RTC_NORMAL) goto error; rtc = HECMW_part_set_log_n_elem_g(global_mesh->n_elem); if (rtc != RTC_NORMAL) goto error; } if (global_mesh->n_subdomain == 1) { current_domain = 0; if (global_mesh->my_rank == 0) { HECMW_log(HECMW_LOG_INFO, "Creating local mesh for domain #%d ...", current_domain); ofheader = HECMW_ctrl_get_meshfiles_header_sub( "part_out", global_mesh->n_subdomain, current_domain); if (ofheader == NULL) { HECMW_log(HECMW_LOG_ERROR, "not set output file header"); error_in_ompsection = 1; goto error; } if (ofheader->n_mesh == 0) { HECMW_log(HECMW_LOG_ERROR, "output file name is not set"); error_in_ompsection = 1; goto error; } get_dist_file_name(ofheader->meshfiles[0].filename, current_domain, ofname); HECMW_assert(ofname != NULL); HECMW_log(HECMW_LOG_DEBUG, "Starting writing local mesh for domain #%d...", current_domain); HECMW_put_dist_mesh(global_mesh, ofname); HECMW_log(HECMW_LOG_DEBUG, "Writing local mesh for domain #%d done", current_domain); rtc = HECMW_part_set_log_n_elem(0, global_mesh->n_elem); if (rtc != 0) goto error; rtc = HECMW_part_set_log_n_node(0, global_mesh->n_node); if (rtc != 0) goto error; rtc = HECMW_part_set_log_ne_internal(0, global_mesh->ne_internal); if (rtc != 0) goto error; rtc = HECMW_part_set_log_nn_internal(0, global_mesh->nn_internal); if (rtc != 0) goto error; rtc = HECMW_part_print_log(); if (rtc) goto error; } HECMW_part_finalize_log(); return global_mesh; } num_elem = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (num_elem == NULL) { HECMW_set_error(errno, ""); goto error; } num_node = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (num_node == NULL) { HECMW_set_error(errno, ""); goto error; } num_ielem = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (num_ielem == NULL) { HECMW_set_error(errno, ""); goto error; } num_inode = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (num_inode == NULL) { HECMW_set_error(errno, ""); goto error; } num_nbpe = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (num_nbpe == NULL) { HECMW_set_error(errno, ""); goto error; } sum_elem = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (sum_elem == NULL) { HECMW_set_error(errno, ""); goto error; } sum_node = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (sum_node == NULL) { HECMW_set_error(errno, ""); goto error; } sum_ielem = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (sum_ielem == NULL) { HECMW_set_error(errno, ""); goto error; } sum_inode = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (sum_inode == NULL) { HECMW_set_error(errno, ""); goto error; } sum_nbpe = (int *)HECMW_calloc(global_mesh->n_subdomain, sizeof(int)); if (sum_nbpe == NULL) { HECMW_set_error(errno, ""); goto error; } rtc = wnumbering(global_mesh, cont_data); if (rtc != RTC_NORMAL) goto error; /*K. Inagaki */ rtc = spdup_makelist_main(global_mesh); if (rtc != RTC_NORMAL) goto error; #ifdef _OPENMP #pragma omp parallel default(none), \ private(node_flag, elem_flag, local_mesh, nrank, iS, iE, i, \ current_domain, rtc, ofheader, ofname), \ private(node_global2local, elem_global2local, \ node_flag_neighbor, elem_flag_neighbor), \ shared(global_mesh, cont_data, num_elem, num_node, \ num_ielem, num_inode, num_nbpe, error_in_ompsection) { #endif /* _OPENMP */ node_flag = (char *)HECMW_calloc(global_mesh->n_node, sizeof(char)); if (node_flag == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } elem_flag = (char *)HECMW_calloc(global_mesh->n_elem, sizeof(char)); if (elem_flag == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } /*K. Inagaki */ node_global2local = (int *)HECMW_calloc(global_mesh->n_node, sizeof(int)); if (node_global2local == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } elem_global2local = (int *)HECMW_calloc(global_mesh->n_elem, sizeof(int)); if (elem_global2local == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } node_flag_neighbor = (char *)HECMW_malloc(sizeof(char) * global_mesh->n_node); if (node_flag_neighbor == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } elem_flag_neighbor = (char *)HECMW_malloc(sizeof(char) * global_mesh->n_elem); if (elem_flag_neighbor == NULL) { HECMW_set_error(errno, ""); error_in_ompsection = 1; goto error_omp; } memset(node_flag_neighbor, 0, sizeof(char) * global_mesh->n_node); memset(elem_flag_neighbor, 0, sizeof(char) * global_mesh->n_elem); local_mesh = HECMW_dist_alloc(); if (local_mesh == NULL) { error_in_ompsection = 1; goto error_omp; } nrank = global_mesh->n_subdomain / HECMW_comm_get_size(); iS = HECMW_comm_get_rank() * nrank; iE = iS + nrank; if (HECMW_comm_get_rank() == HECMW_comm_get_size() - 1) iE = global_mesh->n_subdomain; #ifdef _OPENMP #pragma omp for schedule(dynamic, 1), reduction(+ : error_in_ompsection) #endif for (i = iS; i < iE; i++) { if (error_in_ompsection) continue; current_domain = i; HECMW_log(HECMW_LOG_INFO, "Creating local mesh for domain #%d ...", current_domain); rtc = create_neighbor_info(global_mesh, local_mesh, node_flag, elem_flag, current_domain); if (rtc != RTC_NORMAL) { error_in_ompsection = 1; continue; } if (global_mesh->n_subdomain > 1) { rtc = create_comm_info(global_mesh, local_mesh, node_flag, elem_flag, node_flag_neighbor, elem_flag_neighbor, current_domain); if (rtc != RTC_NORMAL) { error_in_ompsection = 1; continue; } } rtc = const_local_data(global_mesh, local_mesh, cont_data, node_flag, elem_flag, node_global2local, elem_global2local, current_domain); if (rtc != RTC_NORMAL) { error_in_ompsection = 1; continue; } num_elem[i] = local_mesh->n_elem; num_node[i] = local_mesh->n_node; num_ielem[i] = local_mesh->ne_internal; num_inode[i] = local_mesh->nn_internal; num_nbpe[i] = local_mesh->n_neighbor_pe; ofheader = HECMW_ctrl_get_meshfiles_header_sub( "part_out", global_mesh->n_subdomain, current_domain); if (ofheader == NULL) { HECMW_log(HECMW_LOG_ERROR, "not set output file header"); error_in_ompsection = 1; continue; } if (ofheader->n_mesh == 0) { HECMW_log(HECMW_LOG_ERROR, "output file name is not set"); error_in_ompsection = 1; continue; } get_dist_file_name(ofheader->meshfiles[0].filename, current_domain, ofname); HECMW_assert(ofname != NULL); HECMW_log(HECMW_LOG_DEBUG, "Starting writing local mesh for domain #%d...", current_domain); HECMW_put_dist_mesh(local_mesh, ofname); HECMW_log(HECMW_LOG_DEBUG, "Writing local mesh for domain #%d done", current_domain); clean_struct_local_mesh(local_mesh); HECMW_ctrl_free_meshfiles(ofheader); ofheader = NULL; if (is_spdup_available(global_mesh)) { /*K. Inagaki */ spdup_clear_IEB(node_flag, elem_flag, current_domain); } else { int j; for (j = 0; j < global_mesh->n_node; j++) { CLEAR_IEB(node_flag[j]); } for (j = 0; j < global_mesh->n_elem; j++) { CLEAR_IEB(elem_flag[j]); } } } #ifdef _OPENMP if (error_in_ompsection) goto error_omp; #pragma omp single #endif if (cont_data->is_print_ucd == 1) { if (global_mesh->my_rank == 0) { print_ucd_entire(global_mesh, node_flag, elem_flag, cont_data->ucd_file_name); } } error_omp: HECMW_dist_free(local_mesh); HECMW_free(node_flag); HECMW_free(elem_flag); /*K. Inagaki */ HECMW_free(node_global2local); HECMW_free(elem_global2local); HECMW_free(node_flag_neighbor); HECMW_free(elem_flag_neighbor); #ifdef _OPENMP } /* omp end parallel */ if (error_in_ompsection) goto error; #endif rtc = HECMW_Allreduce(num_elem, sum_elem, global_mesh->n_subdomain, HECMW_INT, HECMW_SUM, HECMW_comm_get_comm()); if (rtc != 0) goto error; rtc = HECMW_Allreduce(num_node, sum_node, global_mesh->n_subdomain, HECMW_INT, HECMW_SUM, HECMW_comm_get_comm()); if (rtc != 0) goto error; rtc = HECMW_Allreduce(num_ielem, sum_ielem, global_mesh->n_subdomain, HECMW_INT, HECMW_SUM, HECMW_comm_get_comm()); if (rtc != 0) goto error; rtc = HECMW_Allreduce(num_inode, sum_inode, global_mesh->n_subdomain, HECMW_INT, HECMW_SUM, HECMW_comm_get_comm()); if (rtc != 0) goto error; rtc = HECMW_Allreduce(num_nbpe, sum_nbpe, global_mesh->n_subdomain, HECMW_INT, HECMW_SUM, HECMW_comm_get_comm()); if (rtc != 0) goto error; if (global_mesh->my_rank == 0) { for (i = 0; i < global_mesh->n_subdomain; i++) { rtc = HECMW_part_set_log_n_elem(i, sum_elem[i]); if (rtc != 0) goto error; rtc = HECMW_part_set_log_n_node(i, sum_node[i]); if (rtc != 0) goto error; rtc = HECMW_part_set_log_ne_internal(i, sum_ielem[i]); if (rtc != 0) goto error; rtc = HECMW_part_set_log_nn_internal(i, sum_inode[i]); if (rtc != 0) goto error; rtc = HECMW_part_set_log_n_neighbor_pe(i, sum_nbpe[i]); if (rtc != 0) goto error; } rtc = HECMW_part_print_log(); if (rtc) goto error; } HECMW_part_finalize_log(); HECMW_free(num_elem); HECMW_free(num_node); HECMW_free(num_ielem); HECMW_free(num_inode); HECMW_free(num_nbpe); HECMW_free(sum_elem); HECMW_free(sum_node); HECMW_free(sum_ielem); HECMW_free(sum_inode); HECMW_free(sum_nbpe); /*K. Inagaki */ spdup_freelist(global_mesh); return global_mesh; error: HECMW_free(node_flag); HECMW_free(elem_flag); HECMW_free(num_elem); HECMW_free(num_node); HECMW_free(num_ielem); HECMW_free(num_inode); HECMW_free(num_nbpe); HECMW_free(sum_elem); HECMW_free(sum_node); HECMW_free(sum_ielem); HECMW_free(sum_inode); HECMW_free(sum_nbpe); HECMW_dist_free(local_mesh); if (ofheader) { HECMW_ctrl_free_meshfiles(ofheader); } HECMW_part_finalize_log(); return NULL; } extern struct hecmwST_local_mesh *HECMW_partition( struct hecmwST_local_mesh *global_mesh) { struct hecmwST_local_mesh *local_mesh; struct hecmw_part_cont_data *cont_data; HECMW_log(HECMW_LOG_INFO, "Starting domain decomposition...\n"); if (global_mesh == NULL) { HECMW_set_error(HECMW_PART_E_INV_ARG, "\'global_mesh\' is NULL"); goto error; } cont_data = HECMW_part_get_control(global_mesh); if (cont_data == NULL) goto error; local_mesh = HECMW_partition_inner(global_mesh, cont_data); if (local_mesh == NULL) goto error; HECMW_part_free_control(cont_data); HECMW_log(HECMW_LOG_INFO, "Domain decomposition done\n"); return local_mesh; error: return NULL; }
nbody_mkl.c
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <mkl_extensions.h> #include <string.h> #include <vec.h> #include "nbody.h" #include "nbody_mkl.h" /** Computes Sum(G * pm / r ** 2 * (dx / r)). * * Diagonal elements are not counted in the sum. * */ void compute_force(MKL_INT n, double *dx, double *pm, double *r, double *tmp1, double *output) { MKL_INT size = n * n; vdMuli(size, pm, G, tmp1); vdPowx(size, r, 2.0, output); vdDiv(size, tmp1, output, tmp1); vdDiv(size, dx, r, output); vdMul(size, tmp1, output, tmp1); memset(output, 0, sizeof(double) * n); #pragma omp parallel for for (MKL_INT i = 0; i < n; i++) { double sum = 0.0; for (MKL_INT j = 0; j < n; j++) { // Ignore diagonal elements. if (i != j) { // Causes some imprecision compared to reference? sum += tmp1[i*n + j]; } } output[i] += sum; } } void move(MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz, // Temporaries that have n * n space. double *dx, double *dy, double *dz, double *pm, double *r, double *tmp1, double *tmp2) { set_delta(n, x, dx); set_delta(n, y, dy); set_delta(n, z, dz); set_pm(n, m, pm); MKL_INT size = n * n; // r = sqrt(dx**2 + dy**2 + dz**2) vdPowx(size, dx, 2.0, tmp1); vdPowx(size, dy, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdPowx(size, dz, 2.0, tmp2); vdAdd(size, tmp1, tmp2, tmp1); vdSqrt(size, tmp1, r); compute_force(n, dx, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vx, tmp1, vx); vdMuli(n, vx, dt, tmp1); vdAdd(n, x, tmp1, x); compute_force(n, dy, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vy, tmp1, vy); vdMuli(n, vy, dt, tmp1); vdAdd(n, y, tmp1, y); compute_force(n, dz, pm, r, tmp1, tmp2); vdDiv(n, tmp2, m, tmp1); vdMuli(n, tmp1, dt, tmp1); vdAdd(n, vz, tmp1, vz); vdMuli(n, vz, dt, tmp1); vdAdd(n, z, tmp1, z); } void run_mkl(int iterations, MKL_INT n, double *m, double *x, double *y, double *z, double *vx, double *vy, double *vz) { vec_t dx = new_vec(n * n, 0); vec_t dy = new_vec(n * n, 0); vec_t dz = new_vec(n * n, 0); vec_t pm = new_vec(n * n, 0); vec_t r = new_vec(n * n, 0); vec_t tmp1 = new_vec(n * n, 0); vec_t tmp2 = new_vec(n * n, 0); for (int i = 0; i < iterations; i++) { printf("iteration %d\n", i); move(n, m, x, y, z, vx, vy, vz, dx.data, dy.data, dz.data, pm.data, r.data, tmp1.data, tmp2.data); } }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle) { KernelInfo *clone_info, *last; clone_info=(KernelInfo *) NULL; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if ((morphology_traits & BlendPixelTrait) == 0) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; maximum=0.0; minimum=(double) QuantumRange; switch (method) { case ConvolveMorphology: { pixel=bias; break; } case DilateMorphology: case ErodeIntensityMorphology: { pixel=0.0; break; } case HitAndMissMorphology: case ErodeMorphology: { pixel=QuantumRange; break; } default: { pixel=(double) p[center+i]; break; } } count=0; gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); if ((morphology_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showKernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showKernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showKernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showKernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
GB_binop__bxnor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxnor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxnor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxnor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_int16) // C=scalar+B GB (_bind1st__bxnor_int16) // C=scalar+B' GB (_bind1st_tran__bxnor_int16) // C=A+scalar GB (_bind2nd__bxnor_int16) // C=A'+scalar GB (_bind2nd_tran__bxnor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = ~((aij) ^ (bij)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ~((x) ^ (y)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXNOR || GxB_NO_INT16 || GxB_NO_BXNOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxnor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxnor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxnor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxnor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxnor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ~((x) ^ (bij)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxnor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ~((aij) ^ (y)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((x) ^ (aij)) ; \ } GrB_Info GB (_bind1st_tran__bxnor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ~((aij) ^ (y)) ; \ } GrB_Info GB (_bind2nd_tran__bxnor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dotProduct_critical.c
/* OpenMP example program which computes the dot product of two arrays a and b (that is sum(a[i]*b[i]) ) using explicit synchronization with a critical region. Compile with gcc -O3 -fopenmp omp_critical.c -o omp_critical */ // Online source: http://users.abo.fi/mats/PP2012/examples/OpenMP/omp_critical.c // permission obtained #include <omp.h> #include <stdio.h> #include <stdlib.h> #ifdef _CIVL #define N 10 #else #define N 100 #endif int main (int argc, char *argv[]) { double a[N], b[N]; double localsum, sum = 0.0; int i, tid, nthreads; #pragma omp parallel shared(a,b,sum) private(i, localsum) { /* Get thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } /* Initialization */ #pragma omp for for (i=0; i < N; i++) a[i] = b[i] = (double)i; localsum = 0.0; /* Compute the local sums of all products */ #pragma omp for for (i=0; i < N; i++) localsum = localsum + (a[i] * b[i]); #pragma omp critical sum = sum+localsum; } /* End of parallel region */ printf(" Sum = %2.1f\n",sum); exit(0); }
reduction_minus_1.c
// PASS: * // RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 4 %t) %s.reference_output #include <stdio.h> #include <stdlib.h> #include <omp.h> int main() { int result = 0; #pragma omp parallel reduction(-:result) { result -= omp_get_thread_num(); } printf("Result: %d\n", result); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
BatchNormalization.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/BatchNormalization.c" #else void THNN_(BatchNormalization_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *running_mean, THTensor *running_var, THTensor *save_mean, THTensor *save_std, bool train, double momentum, double eps) { THTensor_(resizeAs)(output, input); int64_t nInput = THTensor_(size)(input, 1); int64_t f; ptrdiff_t n = THTensor_(nElement)(input) / nInput; #pragma omp parallel for for (f = 0; f < nInput; ++f) { THTensor *in = THTensor_(newSelect)(input, 1, f); THTensor *out = THTensor_(newSelect)(output, 1, f); real mean, invstd; if (train) { // compute mean per input accreal sum = 0; TH_TENSOR_APPLY(real, in, sum += *in_data;); mean = (real) sum / n; THTensor_(set1d)(save_mean, f, (real) mean); // compute variance per input sum = 0; TH_TENSOR_APPLY(real, in, sum += (*in_data - mean) * (*in_data - mean);); if (sum == 0 && eps == 0.0) { invstd = 0; } else { invstd = (real) (1 / sqrt(sum/n + eps)); } THTensor_(set1d)(save_std, f, (real) invstd); // update running averages THTensor_(set1d)(running_mean, f, (real) (momentum * mean + (1 - momentum) * THTensor_(get1d)(running_mean, f))); accreal unbiased_var = sum / (n - 1); THTensor_(set1d)(running_var, f, (real) (momentum * unbiased_var + (1 - momentum) * THTensor_(get1d)(running_var, f))); } else { mean = THTensor_(get1d)(running_mean, f); invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps); } // compute output real w = weight ? THTensor_(get1d)(weight, f) : 1; real b = bias ? THTensor_(get1d)(bias, f) : 0; TH_TENSOR_APPLY2(real, in, real, out, *out_data = (real) (((*in_data - mean) * invstd) * w + b);); THTensor_(free)(out); THTensor_(free)(in); } } void THNN_(BatchNormalization_backward)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *gradWeight, THTensor *gradBias, THTensor *weight, THTensor *running_mean, THTensor *running_var, THTensor *save_mean, THTensor *save_std, bool train, double scale, double eps) { THNN_CHECK_SHAPE(input, gradOutput); int64_t nInput = THTensor_(size)(input, 1); int64_t f; ptrdiff_t n = THTensor_(nElement)(input) / nInput; #pragma omp parallel for for (f = 0; f < nInput; ++f) { THTensor *in = THTensor_(newSelect)(input, 1, f); THTensor *gradOut = THTensor_(newSelect)(gradOutput, 1, f); real w = weight ? THTensor_(get1d)(weight, f) : 1; real mean, invstd; if (train) { mean = THTensor_(get1d)(save_mean, f); invstd = THTensor_(get1d)(save_std, f); } else { mean = THTensor_(get1d)(running_mean, f); invstd = 1 / sqrt(THTensor_(get1d)(running_var, f) + eps); } // sum over all gradOutput in feature plane accreal sum = 0; TH_TENSOR_APPLY(real, gradOut, sum += *gradOut_data;); // dot product of the Q(X) and gradOuput accreal dotp = 0; TH_TENSOR_APPLY2(real, in, real, gradOut, dotp += (*in_data - mean) * (*gradOut_data);); if (gradInput) { THTensor_(resizeAs)(gradInput, input); THTensor *gradIn = THTensor_(newSelect)(gradInput, 1, f); if (train) { // when in training mode // Q(X) = X - E[x] ; i.e. input centered to zero mean // Y = Q(X) / σ ; i.e. BN output before weight and bias // dL/dX = (Q(dL/dY) - dot(Y, dL/dY) * Y) / σ * w // projection of gradOutput on to output scaled by std real k = (real) dotp * invstd * invstd / n; TH_TENSOR_APPLY2(real, gradIn, real, in, *gradIn_data = (*in_data - mean) * k;); accreal gradMean = sum / n; TH_TENSOR_APPLY2(real, gradIn, real, gradOut, *gradIn_data = (*gradOut_data - gradMean - *gradIn_data) * invstd * w;); } else { // when in evaluation mode // Q(X) = X - running_mean ; i.e. input centered to zero mean // Y = Q(X) / running_std ; i.e. BN output before weight and bias // dL/dX = w / running_std TH_TENSOR_APPLY2(real, gradIn, real, gradOut, *gradIn_data = *gradOut_data * invstd * w;); } THTensor_(free)(gradIn); } if (gradWeight) { real val = THTensor_(get1d)(gradWeight, f); THTensor_(set1d)(gradWeight, f, val + scale * dotp * invstd); } if (gradBias) { real val = THTensor_(get1d)(gradBias, f); THTensor_(set1d)(gradBias, f, val + scale * sum); } THTensor_(free)(gradOut); THTensor_(free)(in); } } #endif
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/profile.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/resource_.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/module.h" #include "magick/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *,DDSVector4 *,unsigned char *,size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse, matte; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { matte = MagickTrue; decoder = ReadUncompressedRGBA; } else { matte = MagickTrue; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { matte = MagickFalse; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { matte = MagickFalse; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { matte = MagickTrue; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { matte = MagickTrue; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); for (n = 0; n < num_images; n++) { if (n != 0) { if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); /* Start a new image */ AcquireNextImage(image_info,image); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->matte = matte; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } (void) SetImageBackgroundColor(image); if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image,DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; PixelPacket *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if (((x + i) < (ssize_t) image->columns) && ((y + j) < (ssize_t) image->rows)) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code])); if ((colors.a[code] != 0) && (image->matte == MagickFalse)) image->matte=MagickTrue; /* Correct matte */ q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; PixelPacket *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; ssize_t j, y; MagickSizeType alpha_bits; PixelPacket *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (PixelPacket *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) break; /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(q,ScaleCharToQuantum(colors.r[code])); SetPixelGreen(q,ScaleCharToQuantum(colors.g[code])); SetPixelBlue(q,ScaleCharToQuantum(colors.b[code])); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) alpha)); q++; } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } if (EOFBlob(image) != MagickFalse) break; } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image))); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(q,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } SetPixelAlpha(q,QuantumRange); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { PixelPacket *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleMatteType); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255))); } else if (alphaBits == 2) { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (color >> 8))); SetPixelGray(q,ScaleCharToQuantum((unsigned char)color)); } else { SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255))); SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255))); } } else { SetPixelBlue(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelGreen(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelRed(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); SetPixelAlpha(q,ScaleCharToQuantum((unsigned char) ReadBlobByte(image))); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = SetMagickInfo("DDS"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT1"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); entry = SetMagickInfo("DXT5"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->seekable_stream=MagickTrue; entry->description = ConstantString("Microsoft DirectDraw Surface"); entry->magick_module = ConstantString("DDS"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { register ssize_t i; MagickOffsetType offset; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; if ((w == 1) && (h == 1)) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if (clusterFit == MagickFalse || count == 0) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (!image->matte) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, &image->exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,&image->exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MaxTextExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->matte) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->matte != MagickFalse) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MaxTextExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->matte != MagickFalse) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t i, y, bx, by; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const PixelPacket *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p++; match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char* indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,DIV2(columns),DIV2(rows),TriangleFilter,1.0, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4* points, const ssize_t* map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p))); if (image->matte) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p))); p++; } } }
declare_variant_messages.c
// RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // RUN: %clang_cc1 -triple=x86_64-pc-win32 -verify -fopenmp-simd -x c -std=c99 -fms-extensions -Wno-pragma-pack %s // expected-error@+1 {{expected an OpenMP directive}} #pragma omp declare int foo(void); #pragma omp declare variant // expected-error {{expected '(' after 'declare variant'}} #pragma omp declare variant( // expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} #pragma omp declare variant(foo // expected-error {{expected ')'}} expected-error {{expected 'match' clause on 'omp declare variant' directive}} expected-note {{to match this '('}} #pragma omp declare variant(x) // expected-error {{use of undeclared identifier 'x'}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) xxx // expected-error {{expected 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match // expected-error {{expected '(' after 'match'}} #pragma omp declare variant(foo) match( // expected-error {{expected context selector in 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match() // expected-error {{expected context selector in 'match' clause on 'omp declare variant' directive}} #pragma omp declare variant(foo) match(xxx) // expected-error {{expected '=' after 'xxx' context selector set name on 'omp declare variant' directive}} #pragma omp declare variant(foo) match(xxx=) // expected-error {{expected '{' after '='}} #pragma omp declare variant(foo) match(xxx=yyy) // expected-error {{expected '{' after '='}} #pragma omp declare variant(foo) match(xxx=yyy}) // expected-error {{expected '{' after '='}} #pragma omp declare variant(foo) match(xxx={) // expected-error {{expected '}'}} expected-note {{to match this '{'}} #pragma omp declare variant(foo) match(xxx={}) #pragma omp declare variant(foo) match(xxx={vvv}) #pragma omp declare variant(foo) match(xxx={vvv} xxx) // expected-error {{expected ','}} expected-error {{expected '=' after 'xxx' context selector set name on 'omp declare variant' directive}} #pragma omp declare variant(foo) match(xxx={vvv}) xxx // expected-warning {{extra tokens at the end of '#pragma omp declare variant' are ignored}} int bar(void); // expected-error@+2 {{'#pragma omp declare variant' can only be applied to functions}} #pragma omp declare variant(foo) match(xxx={}) int a; // expected-error@+2 {{'#pragma omp declare variant' can only be applied to functions}} #pragma omp declare variant(foo) match(xxx={}) #pragma omp threadprivate(a) int var; #pragma omp threadprivate(var) // expected-error@+2 {{expected an OpenMP directive}} expected-error@+1 {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) #pragma omp declare // expected-error@+3 {{function declaration is expected after 'declare variant' directive}} // expected-error@+1 {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) #pragma omp declare variant(foo) match(xxx={}) #pragma options align=packed int main(); // expected-error@+3 {{function declaration is expected after 'declare variant' directive}} // expected-error@+1 {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) #pragma omp declare variant(foo) match(xxx={}) #pragma init_seg(compiler) int main(); // expected-error@+1 {{single declaration is expected after 'declare variant' directive}} #pragma omp declare variant(foo) match(xxx={}) int b, c; int no_proto(); // expected-error@+3 {{function with '#pragma omp declare variant' must have a prototype}} // expected-note@+1 {{'#pragma omp declare variant' for function specified here}} #pragma omp declare variant(no_proto) match(xxx={}) int no_proto_too(); int after_use_variant(void); int after_use(); int bar() { return after_use(); } // expected-error@+1 {{'#pragma omp declare variant' cannot be applied for function after first usage}} #pragma omp declare variant(after_use_variant) match(xxx={}) int after_use(void); int diff_cc_variant(void); // expected-error@+1 {{function with '#pragma omp declare variant' has a different calling convention}} #pragma omp declare variant(diff_cc_variant) match(xxx={}) __vectorcall int diff_cc(void); int diff_ret_variant(void); // expected-error@+1 {{function with '#pragma omp declare variant' has a different return type}} #pragma omp declare variant(diff_ret_variant) match(xxx={}) void diff_ret(void); void marked(void); void not_marked(void); // expected-note@+1 {{marked as 'declare variant' here}} #pragma omp declare variant(not_marked) match(xxx={}) void marked_variant(void); // expected-warning@+1 {{variant function in '#pragma omp declare variant' is itself marked as '#pragma omp declare variant'}} #pragma omp declare variant(marked_variant) match(xxx={}) void marked(void); // expected-error@+1 {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant // expected-error@+1 {{function declaration is expected after 'declare variant' directive}} #pragma omp declare variant
_Atomic-1.c
/* PR c/65467 */ /* { dg-do compile } */ /* { dg-additional-options "-std=c11" } */ _Atomic int t; #pragma omp threadprivate (t) void foo (void) { _Atomic int a = 4, b = 0, c, d = 3, e; a++; #pragma omp parallel sections num_threads (a) shared (b) private (c) firstprivate (d) lastprivate (e) { #pragma omp section { a++; b++; c = 5; c++; d++; e = 9; e++; } #pragma omp section { a++; b++; c = 5; c++; d++; e = 3; e++; } } e++; t++; #pragma omp parallel copyin (t) private (e) { t++; e = t; #pragma omp single copyprivate (e) { e++; } e++; } } void bar (void) { int a[4]; _Atomic int b = 1, c = 2, f = 8, g = 8, h = 0; _Atomic int d, e[3]; int *_Atomic p; _Atomic int *_Atomic q; int i, j; p = a; q = e; #pragma omp target teams map (tofrom: a[b:c]) num_teams (b) thread_limit (c) a[1]++; #pragma omp target device(h) ; #pragma omp task depend (inout: a[b:c]) ; #pragma omp task depend (out: d, e[b:c]) priority (b) ; #pragma omp task depend (out: p[b:c]) ; #pragma omp task depend (out: q[b:c]) ; #pragma omp taskloop num_tasks (c) for (i = 0; i < 16; i++) ; #pragma omp taskloop grainsize (c) for (i = 0; i < 16; i++) ; #pragma omp parallel for schedule (dynamic, b) for (i = 0; i < 16; i++) ; j = 0; #pragma omp simd linear(j:b) for (i = 0; i < 16; i++) j += b; j = 4; #pragma omp atomic read b = j; #pragma omp atomic write j = c; #pragma omp atomic j += c; #pragma omp atomic capture b = j += c; #pragma omp atomic capture b = ++j; #pragma omp atomic capture { b = j; j = c; } #pragma omp atomic capture { b = j; j++; } #pragma omp atomic capture { j *= c; b = j; } }
MatrixMN.h
///////////////////////////////////////////////////////////////////////////// // Authored by Jeong-Mo Hong for CSE4060 course at Dongguk University CSE // // jeongmo.hong@gmail.com // // Do whatever you want license. // ///////////////////////////////////////////////////////////////////////////// #pragma once #include "VectorND.h" #include <assert.h> #include <fstream> template<class T> class MatrixMN { public: int num_rows_; // m_ int num_cols_; // n_ T *values_; MatrixMN() : values_(nullptr), num_rows_(0), num_cols_(0) {} void initialize(const int& _m, const int& _n, const bool init = true) { const int num_all_old = num_rows_ * num_cols_; num_rows_ = _m; num_cols_ = _n; SAFE_DELETE_ARRAY(values_); const int num_all = num_rows_ * num_cols_; if (num_all_old != num_all) // allocate memory if num_all is changed { // check if the matrix is too large assert((double)num_rows_ * (double)num_cols_ <= (double)INT_MAX); values_ = new T[num_all]; if (init == true) for (int i = 0; i < num_all; i++) values_[i] = (T)0; } } void assignRandom(const T& scale, const T& min) { const int num_all = num_rows_ * num_cols_; for (int i = 0; i < num_all; i++) values_[i] = (T)rand() / (T)RAND_MAX * scale + min; } void assignAll(const T& v) { const int num_all = num_rows_ * num_cols_; for (int i = 0; i < num_all; i++) values_[i] = v; } void multiply(const VectorND<T>& vector, VectorND<T>& result) const; void multiplyTransposed(const VectorND<T>& vector, VectorND<T>& result) const; int get1DIndex(const int& row, const int& column) const { assert(row >= 0); assert(column >= 0); assert(row < num_rows_); assert(row < num_cols_); // column = i, row = j return column + row * num_cols_; // data structure is for faster dot product of a row vector and VectorND input. } T& getValue(const int& row, const int& column) const { return values_[get1DIndex(row, column)]; } //T multiplyRowAndVectorWithBias(const int& row, const VectorND<T>& vector, const T& bias) // (v0, v1, ..., vn-1, bias) //{ // assert(num_cols_ == vector.num_dimension_ + 1); // +1 is for bias // // T dot = (T)0; // for (int col = 0; col < num_cols_ - 1; col++) // num_cols_ - 1 : don't operate on bias now. // { // dot += getValue(row, col) * vector[col]; // } // dot += getValue(row, num_cols_ - 1) * bias; // last column value is the weight of bias // return dot; //} //void multiplyVectorWithBias(const VectorND<T>& vector, const T& bias, VectorND<T>& result) //{ // assert(num_cols_ == (vector.num_dimension_ + 1)); // assert(num_rows_ == result.num_dimension_); // for (int row = 0; row < num_rows_; row++) // { // result[row] = multiplyRowAndVectorWithBias(row, vector, bias); // } //} //void multiplyTransWithBias(const VectorND<T>& vector, VectorND<T>& result) //{ // assert(num_rows_ <= vector.num_dimension_); // don't multiply last bias component // assert(num_cols_ == result.num_dimension_); // for (int col = 0; col < num_cols_; col++) // { // result[col] = (T)0; // for (int row = 0; row < num_rows_; row++) // { // result[col] += getValue(row, col) * vector[row]; // } // } //} void getTransposed(MatrixMN<T>& m_tr) { m_tr.initialize(num_cols_, num_rows_); #pragma omp parallel for for (int row = 0; row < num_rows_; row++) { int ix_from = row * num_cols_; int ix_to = row; for (int col = 0; col < num_cols_; col++, ix_from++, ix_to += num_rows_) { m_tr.values_[ix_to] = values_[ix_from]; } } // test codes /*MatrixMN<double> mat; mat.initialize(2, 3); mat.getValue(0, 0) = 1; mat.getValue(0, 1) = 2; mat.getValue(0, 2) = 3; mat.getValue(1, 0) = 4; mat.getValue(1, 1) = 5; mat.getValue(1, 2) = 6; mat.cout(); MatrixMN<double> mat2; mat.getTransposed(mat2); mat2.cout();*/ } void setDiagonal() { const int num = MIN2(num_cols_, num_rows_); for (int i = 0; i < num_cols_ * num_rows_; i++) values_[i] = 0.0; for (int i = 0; i < num; i++) getValue(i, i) = 1.0; } void cout() { for (int row = 0; row < num_rows_; row++) { for (int col = 0; col < num_cols_; col++) { std::cout << getValue(row, col) << " "; } std::cout << std::endl; } } void normalizeAllRows(const T& row_sum_min) { for (int row = 0; row < num_rows_; row++) normalizeRow(row, row_sum_min); } void normalizeRow(const int& row, const T& row_sum_min) { T row_sum = (T)0; for (int col = 0; col < num_cols_-1; col++) // TODO normalize bias option { row_sum += getValue(row, col); } if (row_sum > row_sum_min) { for (int col = 0; col < num_cols_-1; col++)// TODO normalize bias option { getValue(row, col) /= row_sum; } } } void writeTXT(std::ofstream& of) const { of << num_rows_ << " " << num_cols_ << std::endl; for (int i = 0; i < num_rows_ * num_cols_; i++) { if (i != 0 && i % num_cols_ == 0) of << std::endl; of << values_[i]; if (i != num_rows_ * num_cols_ - 1) of << " "; } of << std::endl; } void check() const { for (int i = 0; i < num_rows_ * num_cols_; i++) { if (std::isnan(values_[i])) { std::cout << "Nan " << values_[i] << std::endl; assert(false); exit(1); } if (std::isinf(values_[i])) { std::cout << "inf " << values_[i] << std::endl; assert(false); exit(1); } } } };
openmp_worksharing.c
#include <stdio.h> #include <omp.h> #define ITERATIONS 100 int main(int argc, char const *argv[]) { // we will use 4 threads omp_set_num_threads(4); // The next pragma directive is used to distribute the for loops in the threads #pragma omp parallel for for (int i = 0; i < ITERATIONS; ++i) { //It will show a message with the value of i and the number of thread printf("Iteration # %d from thread # %d\n", i, omp_get_thread_num()); } return 0; }
lu.pluto_orio.seq.large.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) double L[N][N]; double U[N][N]; double A[N][N+13]; void print_array() { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { fprintf(stderr, "%lf ", round(A[i][j])); if (j%80 == 79) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } void init_arrays() { int i, j, k; /* have to initialize this matrix properly to prevent * division by zero */ for (i=0; i<N; i++) { for (j=0; j<N; j++) { L[i][j] = 0.0; U[i][j] = 0.0; } } for (i=0; i<N; i++) { for (j=0; j<=i; j++) { L[i][j] = i+j+1; U[j][i] = i+j+1; } } for (i=0; i<N; i++) { for (j=0; j<N; j++) { for (k=0; k<N; k++) { A[i][j] += L[i][k]*U[k][j]; } } } } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); /*@ begin PerfTuning ( def build { arg build_command = 'icc -O3 -openmp -I/usr/local/icc/include -lm'; } def performance_counter { arg repetitions = 1; } def performance_params { # param T1_1[] = [1,16,32,64,128]; # param T1_2[] = [1,16,32,64,128]; # param T1_3[] = [1,16,32,64,128]; # param T2_1[] = [1,4,8,16,32]; # param T2_2[] = [1,4,8,16,32]; # param T2_3[] = [1,4,8,16,32]; param T1_1[] = [16]; param T1_2[] = [128]; param T1_3[] = [16]; param T2_1[] = [16]; param T2_2[] = [2]; param T2_3[] = [16]; constraint c1 = (T1_1*T2_1<=1024 and T1_1*T2_1<=1024 and T1_1*T2_1<=1024); constraint c2 = ((T1_1 == T1_3) and (T2_1 == T2_3)); param U1[] = [1]; param U2[] = [1]; param U3[] = [8]; constraint c3 = (U1*U2*U3<=512); param PERM[] = [ #[0,1,2], #[0,2,1], #[1,0,2], #[1,2,0], [2,0,1], #[2,1,0], ]; param PAR[] = [True]; param SCREP[] = [False]; param IVEC[] = [True]; } def search { arg algorithm = 'Exhaustive'; # arg algorithm = 'Simplex'; # arg time_limit = 5; # arg total_runs = 1; } def input_params { param N[] = [1024]; } def input_vars { arg decl_file = 'decl_code.h'; arg init_file = 'init_code.c'; } ) @*/ /**-- (Generated by Orio) Best performance cost: 0.365882 Tuned for specific problem sizes: N = 1024 Best performance parameters: IVEC = True PAR = True PERM = [2, 0, 1] SCREP = False T1_1 = 16 T1_2 = 128 T1_3 = 16 T2_1 = 16 T2_2 = 2 T2_3 = 16 U1 = 1 U2 = 1 U3 = 8 --**/ register int i,j,k; register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t; register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6, newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12; register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6, newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12; /*@ begin PolySyn( parallel = PAR; tiles = [T1_1,T1_2,T1_3,T2_1,T2_2,T2_3]; permut = PERM; unroll_factors = [U1,U2,U3]; scalar_replace = SCREP; vectorize = IVEC; profiling_code = 'lu_profiling.c'; compile_cmd = 'gcc'; compile_opts = '-lm'; ) @*/ #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 2.02s. */ for (c1=-1;c1<=floord(2*N-3,256);c1++) { lb1=max(max(ceild(128*c1-127,256),ceild(256*c1-N+2,256)),0); ub1=min(floord(256*c1+255,256),floord(N-1,256)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(ceild(128*c1-128*c2-32385,32640),ceild(128*c1-128*c2-127,128));c3<=floord(N-1,256);c3++) { for (c4=max(max(16*c1-16*c2,0),16*c1-16*c2-3840*c3-3810);c4<=min(min(min(min(floord(1920*c3+1905,8),16*c1-16*c2+15),floord(N-2,16)),floord(128*c2+127,8)),floord(128*c3+127,8));c4++) { for (c5=max(max(ceild(8*c4-63,64),0),2*c2);c5<=min(2*c2+1,floord(N-1,128));c5++) { for (c6=max(max(max(max(ceild(8*c4-105,120),ceild(-16*c1+16*c2+16*c3+c4-225,241)),ceild(16*c1-16*c2-16*c3-c4-225,239)),ceild(8*c4-7,8)),16*c3);c6<=min(16*c3+15,floord(N-1,16));c6++) { if ((c1 == c2+c3) && (c4 == c6)) { for (c7=max(0,16*c6);c7<=min(min(16*c6+14,128*c5+126),N-2);c7++) { for (c8=max(128*c5,c7+1);c8<=min(N-1,128*c5+127);c8++) { A[c7][c8]=A[c7][c8]/A[c7][c7] ; for (c9=c7+1;c9<=min(16*c6+15,N-1);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ; } } } } /*@ begin Loop( transform Composite( permut = [['c9', 'c7', 'c8']], regtile = (['c7', 'c8', 'c9'],[1, 1, 8]), scalarreplace = (False, 'double'), vector = (True, ['ivdep','vector always'])) for (c7=max(0,16*c4);c7<=min(min(16*c6-1,16*c4+15),128*c5+126);c7++) { for (c8=max(128*c5,c7+1);c8<=min(N-1,128*c5+127);c8++) { for (c9=16*c6;c9<=min(16*c6+15,N-1);c9++) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ; } } } ) @*/{ for (c9t=16*c6; c9t<=min(16*c6+15,N-1)-7; c9t=c9t+8) { for (c7=max(0,16*c4); c7<=min(min(16*c6-1,16*c4+15),128*c5+126); c7++ ) { register int cbv_1, cbv_2; cbv_1=max(128*c5,c7+1); cbv_2=min(N-1,128*c5+127); #pragma ivdep #pragma vector always for (c8=cbv_1; c8<=cbv_2; c8++ ) { A[c9t][c8]=A[c9t][c8]-A[c9t][c7]*A[c7][c8]; A[(c9t+1)][c8]=A[(c9t+1)][c8]-A[(c9t+1)][c7]*A[c7][c8]; A[(c9t+2)][c8]=A[(c9t+2)][c8]-A[(c9t+2)][c7]*A[c7][c8]; A[(c9t+3)][c8]=A[(c9t+3)][c8]-A[(c9t+3)][c7]*A[c7][c8]; A[(c9t+4)][c8]=A[(c9t+4)][c8]-A[(c9t+4)][c7]*A[c7][c8]; A[(c9t+5)][c8]=A[(c9t+5)][c8]-A[(c9t+5)][c7]*A[c7][c8]; A[(c9t+6)][c8]=A[(c9t+6)][c8]-A[(c9t+6)][c7]*A[c7][c8]; A[(c9t+7)][c8]=A[(c9t+7)][c8]-A[(c9t+7)][c7]*A[c7][c8]; } } } for (c9=c9t; c9<=min(16*c6+15,N-1); c9=c9+1) { for (c7=max(0,16*c4); c7<=min(min(16*c6-1,16*c4+15),128*c5+126); c7++ ) { register int cbv_3, cbv_4; cbv_3=max(128*c5,c7+1); cbv_4=min(N-1,128*c5+127); #pragma ivdep #pragma vector always for (c8=cbv_3; c8<=cbv_4; c8++ ) { A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8]; } } } } /*@ end @*/ if ((c1 == c2+c3) && (-c4 == -c6) && (c4 <= min(floord(N-17,16),floord(128*c5+111,16)))) { for (c8=max(128*c5,16*c4+16);c8<=min(N-1,128*c5+127);c8++) { A[16*c4+15][c8]=A[16*c4+15][c8]/A[16*c4+15][16*c4+15] ; } } } } } } } } /* End of CLooG code */ /*@ end @*/ /*@ end @*/ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; #ifndef TEST printf("%f\n", annot_t_total); #else { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (j%100==0) printf("\n"); printf("%f ",A[i][j]); } printf("\n"); } } #endif return ((int) A[0][0]); }
traversal.h
#ifndef TRAVERSAL_H_ #define TRAVERSAL_H_ #include "types.h" #include "adListShared.h" #include "stinger.h" #include "darhh.h" #include "adListChunked.h" #include "GraphTango.h" #include "Vertex.h" #include "topDataStruc.h" template<typename T> class neighborhood; template<typename T> class neighborhood_iter { public: neighborhood_iter(T *ds, NodeID n, bool in_neigh); bool operator!=(neighborhood_iter &it); neighborhood_iter& operator++(); neighborhood_iter& operator++(int); NodeID operator*(); Weight extractWeight(); }; template<typename U> class neighborhood_iter<GraphTango<U>> { friend class neighborhood<GraphTango<U>> ; private: U* cursor; public: neighborhood_iter(U* _cursor) { cursor = _cursor; } bool operator!=(const neighborhood_iter<GraphTango<U>> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { #ifdef CALC_EDGE_TOUCHED #pragma omp atomic g_edge_touched++; #endif cursor++; return *this; } neighborhood_iter& operator++(int) { #ifdef CALC_EDGE_TOUCHED #pragma omp atomic g_edge_touched++; #endif cursor++; return *this; } NodeID operator*() { return cursor->getNodeID(); } Weight extractWeight() { return cursor->getWeight(); } }; template<typename U> class neighborhood_iter<adList<U>> { friend class neighborhood<adList<U>> ; private: adList<U> *ds; NodeID node; bool in_neigh; U *cursor; public: neighborhood_iter(adList<U> *_ds, NodeID _n, bool _in_neigh) : ds(_ds), node(_n), in_neigh(_in_neigh) { if (in_neigh) { bool empty = ds->in_neighbors[node].empty(); cursor = empty ? 0 : &(ds->in_neighbors[node][0]); } else { bool empty = ds->out_neighbors[node].empty(); cursor = empty ? 0 : &(ds->out_neighbors[node][0]); } } bool operator!=(const neighborhood_iter<adList<U>> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { if (in_neigh) { int size_in_neigh = ds->in_neighbors[node].size(); if (cursor == &(ds->in_neighbors[node][size_in_neigh - 1])) { cursor = nullptr; } else cursor = cursor + 1; } else { int size_out_neigh = ds->out_neighbors[node].size(); if (cursor == &(ds->out_neighbors[node][size_out_neigh - 1])) { cursor = nullptr; } else cursor = cursor + 1; } return *this; } neighborhood_iter& operator++(int) { if (in_neigh) { int size_in_neigh = ds->in_neighbors[node].size(); if (cursor == &(ds->in_neighbors[node][size_in_neigh - 1])) { cursor = nullptr; } else cursor = cursor + 1; } else { int size_out_neigh = ds->out_neighbors[node].size(); if (cursor == &(ds->out_neighbors[node][size_out_neigh - 1])) { cursor = nullptr; } else cursor = cursor + 1; } return *this; } NodeID operator*() { return cursor->getNodeID(); } Weight extractWeight() { return cursor->getWeight(); } }; template<typename U> class neighborhood_iter<adListShared<U>> { friend class neighborhood<adListShared<U>> ; private: U *cursor; U *endPtr; public: neighborhood_iter(adListShared<U> *ds, NodeID node, bool in_neigh) { cursor = nullptr; endPtr = nullptr; if(in_neigh){ const int sz = ds->in_neighbors[node].size(); if(sz){ cursor = &(ds->in_neighbors[node][0]); endPtr = cursor + sz; } } else{ const int sz = ds->out_neighbors[node].size(); if(sz){ cursor = &(ds->out_neighbors[node][0]); endPtr = cursor + sz; } } while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return; } cursor++; } cursor = nullptr; endPtr = nullptr; } bool operator!=(const neighborhood_iter<adListShared<U>> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { cursor++; while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return *this; } cursor++; } cursor = nullptr; return *this; } neighborhood_iter& operator++(int) { cursor++; while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return *this; } cursor++; } cursor = nullptr; return *this; } NodeID operator*() { return cursor->getNodeID(); } Weight extractWeight() { return cursor->getWeight(); } }; /* //specialization for stinger ---- OLD template<> class neighborhood_iter<stinger> { friend class neighborhood<stinger> ; private: stinger *ds; NodeID node; bool in_neigh; stinger_edge *cursor; stinger_eb *curr_eb; stinger_vertex *sv; int cursor_index; public: neighborhood_iter(stinger *_ds, NodeID _n, bool _in_neigh) : ds(_ds), node(_n), in_neigh(_in_neigh) { sv = &(ds->vertices[node]); if (in_neigh) { bool empty = (sv->in_neighbors->numEdges == 0); cursor = empty ? 0 : &(sv->in_neighbors->edges[0]); curr_eb = sv->in_neighbors; if (!empty) cursor_index = 0; } else { bool empty = (sv->out_neighbors->numEdges == 0); cursor = empty ? 0 : &(sv->out_neighbors->edges[0]); curr_eb = sv->out_neighbors; if (!empty) cursor_index = 0; } } bool operator!=(const neighborhood_iter<stinger> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { // just increment by 1 if we are in an edgeblock and more edges left if (cursor_index < (curr_eb->numEdges - 1)) { cursor = cursor + 1; cursor_index++; } else if (cursor_index == (curr_eb->numEdges - 1)) { // We are done with current edgeblock if (curr_eb->next != nullptr) { // move to next one, if there is one curr_eb = curr_eb->next; cursor = &(curr_eb->edges[0]); cursor_index = 0; } else { // there is no further, end of traversal cursor = nullptr; } } return *this; } neighborhood_iter& operator++(int) { // just increment by 1 if we are in an edgeblock and more edges left if (cursor_index < (curr_eb->numEdges - 1)) { cursor = cursor + 1; cursor_index++; } else if (cursor_index == (curr_eb->numEdges - 1)) { // We are done with current edgeblock if (curr_eb->next != nullptr) { // move to next one, if there is one curr_eb = curr_eb->next; cursor = &(curr_eb->edges[0]); cursor_index = 0; } else { // there is no further, end of traversal cursor = nullptr; } } return *this; } NodeID operator*() { assert(cursor->neighbor != -1); return cursor->neighbor; } Weight extractWeight() { return cursor->weight; } }; */ //specialization for stinger ---- NEW template<> class neighborhood_iter<stinger> { friend class neighborhood<stinger> ; private: //stinger *ds; //NodeID node; //bool in_neigh; stinger_edge *cursor; stinger_eb *curr_eb; int cursor_index; //stinger_vertex *sv; public: neighborhood_iter(stinger *_ds, NodeID _n, bool _in_neigh) { stinger_vertex* sv = &(_ds->vertices[_n]); if (_in_neigh) { curr_eb = sv->in_neighbors; } else{ curr_eb = sv->out_neighbors; } cursor_index = 0; //find first edge while(curr_eb){ while(cursor_index < curr_eb->high){ if(curr_eb->edges[cursor_index].neighbor >= 0){ cursor = curr_eb->edges + cursor_index; return; } cursor_index++; } cursor_index = 0; curr_eb = curr_eb->next; } cursor = nullptr; } bool operator!=(const neighborhood_iter<stinger> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { while(curr_eb){ cursor_index++; while(cursor_index < curr_eb->high){ if(curr_eb->edges[cursor_index].neighbor >= 0){ //valid edge (otherwise deleted) cursor = curr_eb->edges + cursor_index; return *this; } cursor_index++; } cursor_index = 0; curr_eb = curr_eb->next; } cursor = nullptr; return *this; } neighborhood_iter& operator++(int) { while(curr_eb){ cursor_index++; while(cursor_index < curr_eb->high){ if(curr_eb->edges[cursor_index].neighbor >= 0){ //valid edge (otherwise deleted) cursor = curr_eb->edges + cursor_index; return *this; } cursor_index++; } cursor_index = 0; curr_eb = curr_eb->next; } cursor = nullptr; return *this; } NodeID operator*() { //assert(cursor->neighbor != -1); return cursor->neighbor; } Weight extractWeight() { return cursor->weight; } }; //// // ------------------------------adList_chunk---------------------------------- OLD //template<typename U> //class neighborhood_iter<adListChunked<U>> { // friend class neighborhood<adListChunked<U>> ; //private: // adListChunked<U> *ds; // NodeID node; // bool in_neigh; // U *cursor; // // int64_t part_idx; // int64_t sub_idx; // //public: // neighborhood_iter(adListChunked<U> *_ds, NodeID _n, bool _in_neigh) : // ds(_ds), node(_n), in_neigh(_in_neigh) { // // part_idx = _n % (ds->num_partitions); // // sub_idx = (int) _n/(ds->num_partitions); // part_idx = ds->pt_hash(_n); // sub_idx = ds->hash_within_chunk(_n); // if (in_neigh) { // bool empty = ds->in[part_idx]->partAdList->neighbors[sub_idx].empty(); // cursor = empty ? nullptr : &(ds->in[part_idx]->partAdList->neighbors[sub_idx][0]); // } else { // bool empty = ds->out[part_idx]->partAdList->neighbors[sub_idx].empty(); // cursor = empty ? nullptr : &(ds->out[part_idx]->partAdList->neighbors[sub_idx][0]); // } // } // // bool operator!=(const neighborhood_iter<adListChunked<U>> &it) { // return cursor != it.cursor; // } // // neighborhood_iter& operator++() { // if (in_neigh) { // int size_in_neigh = ds->in[part_idx]->partAdList->neighbors[sub_idx].size(); // if (cursor == &(ds->in[part_idx]->partAdList->neighbors[sub_idx][size_in_neigh - 1])) // cursor = nullptr; // else // cursor = cursor + 1; // } else { // int size_out_neigh = ds->out[part_idx]->partAdList->neighbors[sub_idx].size(); // if (cursor == &(ds->out[part_idx]->partAdList->neighbors[sub_idx][size_out_neigh - 1])) // cursor = nullptr; // else // cursor = cursor + 1; // } // // return *this; // } // // neighborhood_iter& operator++(int) { // if (in_neigh) { // int size_in_neigh = ds->in[part_idx]->partAdList->neighbors[sub_idx].size(); // if (cursor == &(ds->in[part_idx]->partAdList->neighbors[sub_idx][size_in_neigh - 1])) // cursor = nullptr; // else // cursor = cursor + 1; // } else { // int size_out_neigh = ds->out[part_idx]->partAdList->neighbors[sub_idx].size(); // if (cursor == &(ds->out[part_idx]->partAdList->neighbors[sub_idx][size_out_neigh - 1])) // cursor = nullptr; // else // cursor = cursor + 1; // } // // return *this; // } // // NodeID operator*() { // return cursor->getNodeID(); // } // // Weight extractWeight() { // return cursor->getWeight(); // } //}; // ------------------------------adList_chunk---------------------------------- NEW template<typename U> class neighborhood_iter<adListChunked<U>> { friend class neighborhood<adListChunked<U>> ; private: U *cursor; U *endPtr; public: neighborhood_iter(adListChunked<U> *ds, NodeID node, bool in_neigh) { cursor = nullptr; endPtr = nullptr; int64_t part_idx = ds->pt_hash(node); int64_t sub_idx = ds->hash_within_chunk(node); if(in_neigh){ const int sz = ds->in[part_idx]->partAdList->neighbors[sub_idx].size(); if(sz){ cursor = &(ds->in[part_idx]->partAdList->neighbors[sub_idx][0]); endPtr = cursor + sz; } } else{ const int sz = ds->out[part_idx]->partAdList->neighbors[sub_idx].size(); if(sz){ cursor = &( ds->out[part_idx]->partAdList->neighbors[sub_idx][0]); endPtr = cursor + sz; } } while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return; } cursor++; } cursor = nullptr; endPtr = nullptr; } bool operator!=(const neighborhood_iter<adListChunked<U>> &it) { return cursor != it.cursor; } neighborhood_iter& operator++() { cursor++; while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return *this; } cursor++; } cursor = nullptr; return *this; } neighborhood_iter& operator++(int) { cursor++; while(cursor < endPtr){ if(cursor->node >= 0){ //found a valid node return *this; } cursor++; } cursor = nullptr; return *this; } NodeID operator*() { return cursor->getNodeID(); } Weight extractWeight() { return cursor->getWeight(); } }; template<typename U> class neighborhood_iter<darhh<U>> { friend class neighborhood<darhh<U>> ; private: hd_rhh<U> *hd; ld_rhh<U> *ld; typename hd_rhh<U>::iter hd_iter; typename ld_rhh<U>::iter ld_iter; bool low_degree; public: inline neighborhood_iter& operator=(neighborhood_iter const &it); inline bool operator!=(neighborhood_iter const &it); neighborhood_iter& operator++(); neighborhood_iter& operator++(int); inline NodeID operator*(); inline Weight extractWeight(); void set_begin(darhh<U> *ds, NodeID n, bool in); void set_end(); }; template<typename U> void neighborhood_iter<darhh<U>>::set_begin(darhh<U> *ds, NodeID src, bool in) { if (in) { ld = ds->in[ds->pt_hash(src)]->ld; hd = ds->in[ds->pt_hash(src)]->hd; } else { ld = ds->out[ds->pt_hash(src)]->ld; hd = ds->out[ds->pt_hash(src)]->hd; } low_degree = ld->get_degree(src); if (low_degree) ld_iter = ld->begin(src); else hd_iter = hd->begin(src); } template<typename U> void neighborhood_iter<darhh<U>>::set_end() { ld_iter.cursor = nullptr; hd_iter.cursor = nullptr; } template<typename U> neighborhood_iter<darhh<U>>& neighborhood_iter<darhh<U>>::operator=(neighborhood_iter const &other) { hd = other.hd; ld = other.ld; hd_iter = other.hd_iter; ld_iter = other.ld_iter; low_degree = other.low_degree; return *this; } template<typename U> bool neighborhood_iter<darhh<U>>::operator!=(neighborhood_iter const &it) { if (low_degree) return ld_iter != it.ld_iter; else return hd_iter != it.hd_iter; } template<typename U> neighborhood_iter<darhh<U>>& neighborhood_iter<darhh<U>>::operator++() { if (low_degree) ++ld_iter; else ++hd_iter; return *this; } template<typename U> neighborhood_iter<darhh<U>>& neighborhood_iter<darhh<U>>::operator++(int) { if (low_degree) ++ld_iter; else ++hd_iter; return *this; } template<typename U> NodeID neighborhood_iter<darhh<U>>::operator*() { if (low_degree) return ld_iter.cursor->getNodeID(); else return hd_iter.cursor->getNodeID(); } template<typename U> Weight neighborhood_iter<darhh<U>>::extractWeight() { if (low_degree) return ld_iter.cursor->getWeight(); else return hd_iter.cursor->getWeight(); } template<typename T> class neighborhood { private: T *ds; NodeID node; bool in_neigh; public: neighborhood(NodeID _node, T *_ds, bool _in_neigh) : ds(_ds), node(_node), in_neigh(_in_neigh) { } neighborhood_iter<T> begin() { return neighborhood_iter<T>(ds, node, in_neigh); } neighborhood_iter<T> end() { neighborhood_iter<T> n = neighborhood_iter<T>(ds, node, in_neigh); n.cursor = nullptr; return n; } }; //#if defined(USE_HYBRID_HASHMAP_WITH_GROUPING) \ // || defined(USE_HYBRID_HASHMAP_WITH_GROUPING_AND_EDGE_ARR_LOCKING) \ // || defined(USE_HYBRID_HASHMAP_WITH_GROUPING_TIGHTER) #if defined(USE_CAHCE_FRIENDLY_HASH_ONLY) template<typename U> class neighborhood<GraphTango<U>> { private: U* _start; uint64_t _size; public: neighborhood(NodeID _node, GraphTango<U> *_ds, bool _in_neigh) { Vertex<U> &v = _ds->vArray[_node]; if(_in_neigh){ GraphTangoHash<U> &edges = v.inEdges; _size = edges.degree; if(edges.adjList == nullptr){ //create adjacency list from hash table edges.adjList = (U*)globalAllocator.allocate(_size * sizeof(U)); u32 idx = 0; const u32 cap = edges.capacity; for(u32 j = 0; j < cap; j++){ if(edges.neighArr[j].node < FLAG_TOMB_STONE){ edges.adjList[idx++] = edges.neighArr[j]; } } //assert(idx == _size); } _start = edges.adjList; } else{ GraphTangoHash<U> &edges = v.outEdges; _size = edges.degree; if(edges.adjList == nullptr){ //create adjacency list from hash table edges.adjList = (U*)globalAllocator.allocate(_size * sizeof(U)); u32 idx = 0; const u32 cap = edges.capacity; for(u32 j = 0; j < cap; j++){ if(edges.neighArr[j].node < FLAG_TOMB_STONE){ edges.adjList[idx++] = edges.neighArr[j]; } } //assert(idx == _size); } _start = edges.adjList; } } neighborhood_iter<GraphTango<U>> begin() { return neighborhood_iter<GraphTango<U>>(_start); } neighborhood_iter<GraphTango<U>> end() { return neighborhood_iter<GraphTango<U>>(_start + _size); } }; #elif defined(USE_HYBRID_HASHMAP_WITH_GROUPING) \ || defined(USE_HYBRID_HASHMAP_WITH_GROUPING_AND_EDGE_ARR_LOCKING) \ || defined(USE_HYBRID_HASHMAP_WITH_GROUPING_TIGHTER) \ || defined(USE_GT_BALANCED_TYPE3_ONLY) template<typename U> class neighborhood<GraphTango<U>> { private: U* _start; uint64_t _size; public: neighborhood(NodeID _node, GraphTango<U> *_ds, bool _in_neigh) { if(_in_neigh){ _start = _ds->vArray[_node].inEdges.neighArr; _size = _ds->vArray[_node].inEdges.degree; } else{ _start = _ds->vArray[_node].outEdges.neighArr; _size = _ds->vArray[_node].outEdges.degree; } } neighborhood_iter<GraphTango<U>> begin() { return neighborhood_iter<GraphTango<U>>(_start); } neighborhood_iter<GraphTango<U>> end() { return neighborhood_iter<GraphTango<U>>(_start + _size); } }; #elif defined(USE_GT_BALANCED) \ || defined(USE_GT_BALANCED_MALLOC) \ || defined(USE_GT_BALANCED_STDMAP) \ || defined(USE_GT_BALANCED_MALLOC_STDMAP) \ || defined(USE_GT_BALANCED_DYN_PARTITION) \ || defined(USE_GT_BALANCED_ABSEIL) \ || defined(USE_GT_BALANCED_RHH) \ || defined(USE_GT_BALANCED_TSL_RHH) template<typename U> class neighborhood<GraphTango<U>> { private: U* _start; uint64_t _size; public: neighborhood(NodeID _node, GraphTango<U> *_ds, bool _in_neigh) { if(_in_neigh){ if(_ds->vArray[_node].inEdges.capacity <= EdgeArray<U>::TH0){ _start = _ds->vArray[_node].inEdges.etype.type1.neigh; } else{ _start = _ds->vArray[_node].inEdges.etype.type2_3.neighArr; } _size = _ds->vArray[_node].inEdges.degree; } else{ if(_ds->vArray[_node].outEdges.capacity <= EdgeArray<U>::TH0){ _start = _ds->vArray[_node].outEdges.etype.type1.neigh; } else{ _start = _ds->vArray[_node].outEdges.etype.type2_3.neighArr; } _size = _ds->vArray[_node].outEdges.degree; } } neighborhood_iter<GraphTango<U>> begin() { return neighborhood_iter<GraphTango<U>>(_start); } neighborhood_iter<GraphTango<U>> end() { return neighborhood_iter<GraphTango<U>>(_start + _size); } }; #elif defined(USE_GT_UPDATE) #else template<typename U> class neighborhood<GraphTango<U>> { private: U* _start; uint64_t _size; public: neighborhood(NodeID _node, GraphTango<U> *_ds, bool _in_neigh) { if(_in_neigh){ _start = _ds->vArray.inNeighArr[_node]; _size = _ds->vArray.inDegree[_node]; } else{ _start = _ds->vArray.outNeighArr[_node]; _size = _ds->vArray.outDegree[_node]; } } neighborhood_iter<GraphTango<U>> begin() { // #pragma omp atomic // g_totalEdges += _size; return neighborhood_iter<GraphTango<U>>(_start); } neighborhood_iter<GraphTango<U>> end() { return neighborhood_iter<GraphTango<U>>(_start + _size); } }; #endif template<typename U> class neighborhood<darhh<U>> { private: using iter = neighborhood_iter<darhh<U>>; NodeID src; darhh<U> *ds; bool in; public: neighborhood(NodeID src, darhh<U> *ds, bool in) : src(src), ds(ds), in(in) { } iter begin() { iter it; it.set_begin(ds, src, in); return it; } iter end() { iter it; it.set_end(); return it; } }; template<typename T> neighborhood<T> in_neigh(NodeID n, T *ds) { if (ds->directed) return neighborhood<T>(n, ds, true); else return neighborhood<T>(n, ds, false); } template<typename T> neighborhood<T> out_neigh(NodeID n, T *ds) { return neighborhood<T>(n, ds, false); } #endif // TRAVERSAL_H_
convolution_4x4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv4x4s4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 4 * outw + w * 3; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p * inch * 16 + q * 16; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); #else const float* k0 = kernel0; const float* k1 = kernel0 + 4; const float* k2 = kernel0 + 8; const float* k3 = kernel0 + 12; #endif // __ARM_NEON for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v7.4s}, [%1] \n" // v7 = outptr "ld1 {v8.4s}, [%2], #16 \n" // v8 = r0 "ld1 {v9.4s}, [%3], #16 \n" // v9 = r1 "prfm pldl1keep, [%4, #512] \n" "prfm pldl1keep, [%5, #512] \n" "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n" // v10 = r2 "ld1 {v11.4s}, [%5], #16 \n" // v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n" // v8 = r0 "ld1 {v9.4s}, [%3], #16 \n" // v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n" // v10 = r2 "ld1 {v11.4s}, [%5], #16 \n" // v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v6.4s, v12.4s, v13.4s \n" "ld1 {v8.4s}, [%2], #16 \n" // v8 = r0 "ld1 {v9.4s}, [%3], #16 \n" // v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n" // v10 = r2 "ld1 {v11.4s}, [%5], #16 \n" // v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v14.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v6.4s \n" // Move to here to enhance ILP "ld1 {v8.4s}, [%2], #16 \n" // v8 = r0 "ld1 {v9.4s}, [%3], #16 \n" // v9 = r1 "fmul v12.4s, v8.4s, %12.4s \n" "fmul v13.4s, v9.4s, %13.4s \n" "ld1 {v10.4s}, [%4], #16 \n" // v10 = r2 "ld1 {v11.4s}, [%5], #16 \n" // v11 = r3 "fmla v12.4s, v10.4s, %14.4s \n" "fmla v13.4s, v11.4s, %15.4s \n" "fadd v15.4s, v12.4s, v13.4s \n" // "faddp v5.4s , v5.4s, v6.4s \n" // Move this line upward. "faddp v14.4s, v14.4s, v15.4s \n" "faddp v5.4s , v5.4s, v14.4s \n" "fadd v7.4s, v7.4s, v5.4s \n" "st1 {v7.4s}, [%1], #16 \n" "prfm pldl1keep, [%1, #128] \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%1, #128] \n" "0: \n" "pld [%2, #512] \n" "pld [%3, #512] \n" "vld1.f32 {d14-d15}, [%1] \n" // q7 = outptr "vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1 "pld [%4, #512] \n" "pld [%5, #512] \n" "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q5, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q6, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q14, q12, q13 \n" "vld1.f32 {d16-d17}, [%2]! \n" // q8 = r0 "vld1.f32 {d18-d19}, [%3]! \n" // q9 = r1 "vmul.f32 q12, q8, %q12 \n" "vmul.f32 q13, q9, %q13 \n" "vld1.f32 {d20-d21}, [%4]! \n" // q10 = r2 "vld1.f32 {d22-d23}, [%5]! \n" // q11 = r3 "vmla.f32 q12, q10, %q14 \n" "vmla.f32 q13, q11, %q15 \n" "vadd.f32 q15, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vadd.f32 d28, d28, d29 \n" "vadd.f32 d11, d12, d13 \n" "vadd.f32 d29, d30, d31 \n" "vpadd.f32 d10, d10, d11 \n" "vpadd.f32 d11, d28, d29 \n" "vadd.f32 q7, q7, q5 \n" "vst1.f32 {d14-d15}, [%1]! \n" "pld [%1, #128] \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415) // %15 : "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ float sum = 0.f; asm volatile( "ld1 {v8.4s}, [%0], #16 \n" // v8 = r0 "ld1 {v9.4s}, [%1], #16 \n" // v9 = r1 "fmul v12.4s, v8.4s, %9.4s \n" "fmul v13.4s, v9.4s, %10.4s \n" "ld1 {v10.4s}, [%2], #16 \n" // v10 = r2 "ld1 {v11.4s}, [%3], #16 \n" // v11 = r3 "fmla v12.4s, v10.4s, %11.4s \n" "fmla v13.4s, v11.4s, %12.4s \n" "fadd v5.4s, v12.4s, v13.4s \n" "faddp v5.4s, v5.4s, v5.4s \n" "faddp s5, v5.2s \n" "fmov %w4, s5 \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13"); *outptr += sum; #else float sum = 0.f; asm volatile( "vld1.f32 {d16-d17}, [%0]! \n" // q8 = r0 "vld1.f32 {d18-d19}, [%1]! \n" // q9 = r1 "vmul.f32 q12, q8, %q9 \n" "vmul.f32 q13, q9, %q10 \n" "vld1.f32 {d20-d21}, [%2]! \n" // q10 = r2 "vld1.f32 {d22-d23}, [%3]! \n" // q11 = r3 "vmla.f32 q12, q10, %q11 \n" "vmla.f32 q13, q11, %q12 \n" "vadd.f32 q5, q12, q13 \n" "vadd.f32 d10, d10, d11 \n" "vpadd.f32 d10, d10, d10 \n" "vmov.f32 %4, d10[0] \n" : "=r"(r0), // %0 "=r"(r1), // %1 "=r"(r2), // %2 "=r"(r3), // %3 "=r"(sum) // %4 : "0"(r0), "1"(r1), "2"(r2), "3"(r3), "w"(_k0123), // %9 "w"(_k4567), // %10 "w"(_k891011), // %11 "w"(_k12131415) // %12 : "cc", "memory", "q5", "q6", "q8", "q9", "q10", "q11", "q12", "q13"); *outptr += sum; #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; *outptr += sum; r0 += 4; r1 += 4; r2 += 4; r3 += 4; #endif // __ARM_NEON outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; } } } }
7633.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop #pragma omp parallel { #pragma omp parallel for schedule(static, 8) num_threads(4) for (i = 0; i < _PB_NY; i++) y[i] = 0; #pragma omp parallel for private (j) schedule(static, 8) num_threads(4) for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
GB_binop__bshift_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint64) // C=scalar+B GB (_bind1st__bshift_uint64) // C=scalar+B' GB (_bind1st_tran__bshift_uint64) // C=A+scalar GB (_bind2nd__bshift_uint64) // C=A'+scalar GB (_bind2nd_tran__bshift_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_bitshift_uint64 (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT64 || GxB_NO_BSHIFT_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lensing.c
/** @file lensing.c Documented lensing module * * Simon Prunet and Julien Lesgourgues, 6.12.2010 * * This module computes the lensed temperature and polarization * anisotropy power spectra \f$ C_l^{X}, P(k), ... \f$'s given the * unlensed temperature, polarization and lensing potential spectra. * * Follows Challinor and Lewis full-sky method, astro-ph/0502425 * * The following functions can be called from other modules: * * -# lensing_init() at the beginning (but after spectra_init()) * -# lensing_cl_at_l() at any time for computing Cl_lensed at any l * -# lensing_free() at the end */ #include "lensing.h" #include <time.h> /** * Anisotropy power spectra \f$ C_l\f$'s for all types, modes and initial conditions. * SO FAR: ONLY SCALAR * * This routine evaluates all the lensed \f$ C_l\f$'s at a given value of l by * picking it in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be called from whatever module at whatever time, * provided that lensing_init() has been called before, and * lensing_free() has not been called yet. * * @param ple Input: pointer to lensing structure * @param l Input: multipole number * @param cl_lensed Output: lensed \f$ C_l\f$'s for all types (TT, TE, EE, etc..) * @return the error status */ int lensing_cl_at_l( struct lensing * ple, int l, double * cl_lensed /* array with argument cl_lensed[index_ct] (must be already allocated) */ ) { int last_index; int index_lt; class_test(l > ple->l_lensed_max, ple->error_message, "you asked for lensed Cls at l=%d, they were computed only up to l=%d, you should increase l_max_scalars or decrease the precision parameter delta_l_max",l,ple->l_lensed_max); class_call(array_interpolate_spline(ple->l, ple->l_size, ple->cl_lens, ple->ddcl_lens, ple->lt_size, l, &last_index, cl_lensed, ple->lt_size, ple->error_message), ple->error_message, ple->error_message); /* set to zero for the types such that l<l_max */ for (index_lt=0; index_lt<ple->lt_size; index_lt++) if ((int)l > ple->l_max_lt[index_lt]) cl_lensed[index_lt]=0.; return _SUCCESS_; } /** * This routine initializes the lensing structure (in particular, * computes table of lensed anisotropy spectra \f$ C_l^{X} \f$) * * @param ppr Input: pointer to precision structure * @param ppt Input: pointer to perturbation structure (just in case, not used in current version...) * @param psp Input: pointer to spectra structure * @param pnl Input: pointer to nonlinear structure * @param ple Output: pointer to initialized lensing structure * @return the error status */ int lensing_init( struct precision * ppr, struct perturbs * ppt, struct spectra * psp, struct nonlinear * pnl, struct lensing * ple ) { /** Summary: */ /** - Define local variables */ double * mu; /* mu[index_mu]: discretized values of mu between -1 and 1, roots of Legendre polynomial */ double * w8; /* Corresponding Gauss-Legendre quadrature weights */ double theta,delta_theta; double ** d00; /* dmn[index_mu][index_l] */ double ** d11; double ** d2m2; double ** d22 = NULL; double ** d20 = NULL; double ** d1m1; double ** d31 = NULL; double ** d40 = NULL; double ** d3m1 = NULL; double ** d3m3 = NULL; double ** d4m2 = NULL; double ** d4m4 = NULL; double * buf_dxx; /* buffer */ double * Cgl; /* Cgl[index_mu] */ double * Cgl2; /* Cgl2[index_mu] */ double * sigma2; /* sigma[index_mu] */ double * ksi = NULL; /* ksi[index_mu] */ double * ksiX = NULL; /* ksiX[index_mu] */ double * ksip = NULL; /* ksip[index_mu] */ double * ksim = NULL; /* ksim[index_mu] */ double fac,fac1; double X_000; double X_p000; double X_220; double X_022; double X_p022; double X_121; double X_132; double X_242; int num_mu,index_mu,icount; int l; double ll; double * cl_unlensed; /* cl_unlensed[index_ct] */ double * cl_tt; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_te = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_ee = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_bb = NULL; /* unlensed cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double * cl_pp; /* potential cl, to be filled to avoid repeated calls to spectra_cl_at_l */ double res,resX,lens; double resp, resm, lensp, lensm; double * sqrt1; double * sqrt2; double * sqrt3; double * sqrt4; double * sqrt5; double ** cl_md_ic; /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */ double ** cl_md; /* array with argument cl_md[index_md][index_ct] */ int index_md; /* Timing */ //double debut, fin; //double cpu_time; /** - check that we really want to compute at least one spectrum */ if (ple->has_lensed_cls == _FALSE_) { if (ple->lensing_verbose > 0) printf("No lensing requested. Lensing module skipped.\n"); return _SUCCESS_; } else { if (ple->lensing_verbose > 0) { printf("Computing lensed spectra "); if (ppr->accurate_lensing==_TRUE_) printf("(accurate mode)\n"); else printf("(fast mode)\n"); } } /** - initialize indices and allocate some of the arrays in the lensing structure */ class_call(lensing_indices(ppr,psp,ple), ple->error_message, ple->error_message); /** - put all precision variables hare; will be stored later in precision structure */ /** - Last element in \f$ \mu \f$ will be for \f$ \mu=1 \f$, needed for sigma2. The rest will be chosen as roots of a Gauss-Legendre quadrature **/ if (ppr->accurate_lensing == _TRUE_) { num_mu=(ple->l_unlensed_max+ppr->num_mu_minus_lmax); /* Must be even ?? CHECK */ num_mu += num_mu%2; /* Force it to be even */ } else { /* Integrate correlation function difference on [0,pi/16] */ num_mu = (ple->l_unlensed_max * 2 )/16; } /** - allocate array of \f$ \mu \f$ values, as well as quadrature weights */ class_alloc(mu, num_mu*sizeof(double), ple->error_message); /* Reserve last element of mu for mu=1, needed for sigma2 */ mu[num_mu-1] = 1.0; class_alloc(w8, (num_mu-1)*sizeof(double), ple->error_message); if (ppr->accurate_lensing == _TRUE_) { //debut = omp_get_wtime(); class_call(quadrature_gauss_legendre(mu, w8, num_mu-1, ppr->tol_gauss_legendre, ple->error_message), ple->error_message, ple->error_message); //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in quadrature_gauss_legendre=%4.3f s\n",cpu_time); } else { /* Crude integration on [0,pi/16]: Riemann sum on theta */ delta_theta = _PI_/16. / (double)(num_mu-1); for (index_mu=0;index_mu<num_mu-1;index_mu++) { theta = (index_mu+1)*delta_theta; mu[index_mu] = cos(theta); w8[index_mu] = sin(theta)*delta_theta; /* We integrate on mu */ } } /** - Compute \f$ d^l_{mm'} (\mu) \f$*/ icount = 0; class_alloc(d00, num_mu*sizeof(double*), ple->error_message); class_alloc(d11, num_mu*sizeof(double*), ple->error_message); class_alloc(d1m1, num_mu*sizeof(double*), ple->error_message); class_alloc(d2m2, num_mu*sizeof(double*), ple->error_message); icount += 4*num_mu*(ple->l_unlensed_max+1); if(ple->has_te==_TRUE_) { class_alloc(d20, num_mu*sizeof(double*), ple->error_message); class_alloc(d3m1, num_mu*sizeof(double*), ple->error_message); class_alloc(d4m2, num_mu*sizeof(double*), ple->error_message); icount += 3*num_mu*(ple->l_unlensed_max+1); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_alloc(d22, num_mu*sizeof(double*), ple->error_message); class_alloc(d31, num_mu*sizeof(double*), ple->error_message); class_alloc(d3m3, num_mu*sizeof(double*), ple->error_message); class_alloc(d40, num_mu*sizeof(double*), ple->error_message); class_alloc(d4m4, num_mu*sizeof(double*), ple->error_message); icount += 5*num_mu*(ple->l_unlensed_max+1); } icount += 5*(ple->l_unlensed_max+1); /* for arrays sqrt1[l] to sqrt5[l] */ /** - Allocate main contiguous buffer **/ class_alloc(buf_dxx, icount * sizeof(double), ple->error_message); icount = 0; for (index_mu=0; index_mu<num_mu; index_mu++) { d00[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d11[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d1m1[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); d2m2[index_mu]= &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]); } icount += 4*num_mu*(ple->l_unlensed_max+1); if (ple->has_te==_TRUE_) { for (index_mu=0; index_mu<num_mu; index_mu++) { d20[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d3m1[index_mu]= &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d4m2[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); } icount += 3*num_mu*(ple->l_unlensed_max+1); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { for (index_mu=0; index_mu<num_mu; index_mu++) { d22[index_mu] = &(buf_dxx[icount+index_mu * (ple->l_unlensed_max+1)]); d31[index_mu] = &(buf_dxx[icount+(index_mu+num_mu) * (ple->l_unlensed_max+1)]); d3m3[index_mu]= &(buf_dxx[icount+(index_mu+2*num_mu) * (ple->l_unlensed_max+1)]); d40[index_mu] = &(buf_dxx[icount+(index_mu+3*num_mu) * (ple->l_unlensed_max+1)]); d4m4[index_mu]= &(buf_dxx[icount+(index_mu+4*num_mu) * (ple->l_unlensed_max+1)]); } icount += 5*num_mu*(ple->l_unlensed_max+1); } sqrt1 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt2 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt3 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt4 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; sqrt5 = &(buf_dxx[icount]); icount += ple->l_unlensed_max+1; //debut = omp_get_wtime(); class_call(lensing_d00(mu,num_mu,ple->l_unlensed_max,d00), ple->error_message, ple->error_message); class_call(lensing_d11(mu,num_mu,ple->l_unlensed_max,d11), ple->error_message, ple->error_message); class_call(lensing_d1m1(mu,num_mu,ple->l_unlensed_max,d1m1), ple->error_message, ple->error_message); class_call(lensing_d2m2(mu,num_mu,ple->l_unlensed_max,d2m2), ple->error_message, ple->error_message); //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in lensing_dxx=%4.3f s\n",cpu_time); if (ple->has_te==_TRUE_) { class_call(lensing_d20(mu,num_mu,ple->l_unlensed_max,d20), ple->error_message, ple->error_message); class_call(lensing_d3m1(mu,num_mu,ple->l_unlensed_max,d3m1), ple->error_message, ple->error_message); class_call(lensing_d4m2(mu,num_mu,ple->l_unlensed_max,d4m2), ple->error_message, ple->error_message); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_call(lensing_d22(mu,num_mu,ple->l_unlensed_max,d22), ple->error_message, ple->error_message); class_call(lensing_d31(mu,num_mu,ple->l_unlensed_max,d31), ple->error_message, ple->error_message); class_call(lensing_d3m3(mu,num_mu,ple->l_unlensed_max,d3m3), ple->error_message, ple->error_message); class_call(lensing_d40(mu,num_mu,ple->l_unlensed_max,d40), ple->error_message, ple->error_message); class_call(lensing_d4m4(mu,num_mu,ple->l_unlensed_max,d4m4), ple->error_message, ple->error_message); } /** - compute \f$ Cgl(\mu)\f$, \f$ Cgl2(\mu) \f$ and sigma2(\f$\mu\f$) */ class_alloc(Cgl, num_mu*sizeof(double), ple->error_message); class_alloc(Cgl2, num_mu*sizeof(double), ple->error_message); class_alloc(sigma2, (num_mu-1)*sizeof(double), /* Zero separation is omitted */ ple->error_message); class_alloc(cl_unlensed, psp->ct_size*sizeof(double), ple->error_message); /** - Locally store unlensed temperature \f$ cl_{tt}\f$ and potential \f$ cl_{pp}\f$ spectra **/ class_alloc(cl_tt, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); if (ple->has_te==_TRUE_) { class_alloc(cl_te, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_alloc(cl_ee, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); class_alloc(cl_bb, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); } class_alloc(cl_pp, (ple->l_unlensed_max+1)*sizeof(double), ple->error_message); class_alloc(cl_md_ic, psp->md_size*sizeof(double *), ple->error_message); class_alloc(cl_md, psp->md_size*sizeof(double *), ple->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) class_alloc(cl_md[index_md], psp->ct_size*sizeof(double), ple->error_message); if (psp->ic_size[index_md] > 1) class_alloc(cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double), ple->error_message); } for (l=2; l<=ple->l_unlensed_max; l++) { class_call(spectra_cl_at_l(psp,l,cl_unlensed,cl_md,cl_md_ic), psp->error_message, ple->error_message); cl_tt[l] = cl_unlensed[ple->index_lt_tt]; cl_pp[l] = cl_unlensed[ple->index_lt_pp]; if (ple->has_te==_TRUE_) { cl_te[l] = cl_unlensed[ple->index_lt_te]; } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { cl_ee[l] = cl_unlensed[ple->index_lt_ee]; cl_bb[l] = cl_unlensed[ple->index_lt_bb]; } } for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) free(cl_md[index_md]); if (psp->ic_size[index_md] > 1) free(cl_md_ic[index_md]); } free(cl_md_ic); free(cl_md); /** - Compute sigma2\f$(\mu)\f$ and Cgl2(\f$\mu\f$) **/ //debut = omp_get_wtime(); #pragma omp parallel for \ private (index_mu,l) \ schedule (static) for (index_mu=0; index_mu<num_mu; index_mu++) { Cgl[index_mu]=0; Cgl2[index_mu]=0; for (l=2; l<=ple->l_unlensed_max; l++) { Cgl[index_mu] += (2.*l+1.)*l*(l+1.)* cl_pp[l]*d11[index_mu][l]; Cgl2[index_mu] += (2.*l+1.)*l*(l+1.)* cl_pp[l]*d1m1[index_mu][l]; } Cgl[index_mu] /= 4.*_PI_; Cgl2[index_mu] /= 4.*_PI_; } for (index_mu=0; index_mu<num_mu-1; index_mu++) { /* Cgl(1.0) - Cgl(mu) */ sigma2[index_mu] = Cgl[num_mu-1] - Cgl[index_mu]; } //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in Cgl,Cgl2,sigma2=%4.3f s\n",cpu_time); /** - compute ksi, ksi+, ksi-, ksiX */ /** - --> ksi is for TT **/ if (ple->has_tt==_TRUE_) { class_calloc(ksi, (num_mu-1), sizeof(double), ple->error_message); } /** - --> ksiX is for TE **/ if (ple->has_te==_TRUE_) { class_calloc(ksiX, (num_mu-1), sizeof(double), ple->error_message); } /** - --> ksip, ksim for EE, BB **/ if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_calloc(ksip, (num_mu-1), sizeof(double), ple->error_message); class_calloc(ksim, (num_mu-1), sizeof(double), ple->error_message); } for (l=2;l<=ple->l_unlensed_max;l++) { ll = (double)l; sqrt1[l]=sqrt((ll+2)*(ll+1)*ll*(ll-1)); sqrt2[l]=sqrt((ll+2)*(ll-1)); sqrt3[l]=sqrt((ll+3)*(ll-2)); sqrt4[l]=sqrt((ll+4)*(ll+3)*(ll-2.)*(ll-3)); sqrt5[l]=sqrt(ll*(ll+1)); } //debut = omp_get_wtime(); #pragma omp parallel for \ private (index_mu,l,ll,res,resX,resp,resm,lens,lensp,lensm, \ fac,fac1,X_000,X_p000,X_220,X_022,X_p022,X_121,X_132,X_242) \ schedule (static) for (index_mu=0;index_mu<num_mu-1;index_mu++) { for (l=2;l<=ple->l_unlensed_max;l++) { ll = (double)l; fac = ll*(ll+1)/4.; fac1 = (2*ll+1)/(4.*_PI_); /* In the following we will keep terms of the form (sigma2)^k*(Cgl2)^m with k+m <= 2 */ X_000 = exp(-fac*sigma2[index_mu]); X_p000 = -fac*X_000; /* X_220 = 0.25*sqrt1[l] * exp(-(fac-0.5)*sigma2[index_mu]); */ X_220 = 0.25*sqrt1[l] * X_000; /* Order 0 */ /* next 5 lines useless, but avoid compiler warning 'may be used uninitialized' */ X_242=0.; X_132=0.; X_121=0.; X_p022=0.; X_022=0.; if (ple->has_te==_TRUE_ || ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { /* X_022 = exp(-(fac-1.)*sigma2[index_mu]); */ X_022 = X_000 * (1+sigma2[index_mu]*(1+0.5*sigma2[index_mu])); /* Order 2 */ X_p022 = (fac-1.)*X_022; /* X_242 = 0.25*sqrt4[l] * exp(-(fac-5./2.)*sigma2[index_mu]); */ X_242 = 0.25*sqrt4[l] * X_000; /* Order 0 */ if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { /* X_121 = - 0.5*sqrt2[l] * exp(-(fac-2./3.)*sigma2[index_mu]); X_132 = - 0.5*sqrt3[l] * exp(-(fac-5./3.)*sigma2[index_mu]); */ X_121 = -0.5*sqrt2[l] * X_000 * (1+2./3.*sigma2[index_mu]); /* Order 1 */ X_132 = -0.5*sqrt3[l] * X_000 * (1+5./3.*sigma2[index_mu]); /* Order 1 */ } } if (ple->has_tt==_TRUE_) { res = fac1*cl_tt[l]; lens = (X_000*X_000*d00[index_mu][l] + X_p000*X_p000*d1m1[index_mu][l] *Cgl2[index_mu]*8./(ll*(ll+1)) + (X_p000*X_p000*d00[index_mu][l] + X_220*X_220*d2m2[index_mu][l]) *Cgl2[index_mu]*Cgl2[index_mu]); if (ppr->accurate_lensing == _FALSE_) { /* Remove unlensed correlation function */ lens -= d00[index_mu][l]; } res *= lens; ksi[index_mu] += res; } if (ple->has_te==_TRUE_) { resX = fac1*cl_te[l]; lens = ( X_022*X_000*d20[index_mu][l] + Cgl2[index_mu]*2.*X_p000/sqrt5[l] * (X_121*d11[index_mu][l] + X_132*d3m1[index_mu][l]) + 0.5 * Cgl2[index_mu] * Cgl2[index_mu] * ( ( 2.*X_p022*X_p000+X_220*X_220 ) * d20[index_mu][l] + X_220*X_242*d4m2[index_mu][l] ) ); if (ppr->accurate_lensing == _FALSE_) { lens -= d20[index_mu][l]; } resX *= lens; ksiX[index_mu] += resX; } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { resp = fac1*(cl_ee[l]+cl_bb[l]); resm = fac1*(cl_ee[l]-cl_bb[l]); lensp = ( X_022*X_022*d22[index_mu][l] + 2.*Cgl2[index_mu]*X_132*X_121*d31[index_mu][l] + Cgl2[index_mu]*Cgl2[index_mu] * ( X_p022*X_p022*d22[index_mu][l] + X_242*X_220*d40[index_mu][l] ) ); lensm = ( X_022*X_022*d2m2[index_mu][l] + Cgl2[index_mu] * ( X_121*X_121*d1m1[index_mu][l] + X_132*X_132*d3m3[index_mu][l] ) + 0.5 * Cgl2[index_mu] * Cgl2[index_mu] * ( 2.*X_p022*X_p022*d2m2[index_mu][l] + X_220*X_220*d00[index_mu][l] + X_242*X_242*d4m4[index_mu][l] ) ); if (ppr->accurate_lensing == _FALSE_) { lensp -= d22[index_mu][l]; lensm -= d2m2[index_mu][l]; } resp *= lensp; resm *= lensm; ksip[index_mu] += resp; ksim[index_mu] += resm; } } } //fin = omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in ksi=%4.3f s\n",cpu_time); /** - compute lensed \f$ C_l\f$'s by integration */ //debut = omp_get_wtime(); if (ple->has_tt==_TRUE_) { class_call(lensing_lensed_cl_tt(ksi,d00,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_tt(ple,cl_tt), ple->error_message, ple->error_message); } } if (ple->has_te==_TRUE_) { class_call(lensing_lensed_cl_te(ksiX,d20,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_te(ple,cl_te), ple->error_message, ple->error_message); } } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { class_call(lensing_lensed_cl_ee_bb(ksip,ksim,d22,d2m2,w8,num_mu-1,ple), ple->error_message, ple->error_message); if (ppr->accurate_lensing == _FALSE_) { class_call(lensing_addback_cl_ee_bb(ple,cl_ee,cl_bb), ple->error_message, ple->error_message); } } //fin=omp_get_wtime(); //cpu_time = (fin-debut); //printf("time in final lensing computation=%4.3f s\n",cpu_time); /** - spline computed \f$ C_l\f$'s in view of interpolation */ class_call(array_spline_table_lines(ple->l, ple->l_size, ple->cl_lens, ple->lt_size, ple->ddcl_lens, _SPLINE_EST_DERIV_, ple->error_message), ple->error_message, ple->error_message); /** - Free lots of stuff **/ free(buf_dxx); free(d00); free(d11); free(d1m1); free(d2m2); if (ple->has_te==_TRUE_) { free(d20); free(d3m1); free(d4m2); } if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(d22); free(d31); free(d3m3); free(d40); free(d4m4); } if (ple->has_tt==_TRUE_) free(ksi); if (ple->has_te==_TRUE_) free(ksiX); if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(ksip); free(ksim); } free(Cgl); free(Cgl2); free(sigma2); free(mu); free(w8); free(cl_unlensed); free(cl_tt); if (ple->has_te==_TRUE_) free(cl_te); if (ple->has_ee==_TRUE_ || ple->has_bb==_TRUE_) { free(cl_ee); free(cl_bb); } free(cl_pp); /** - Exit **/ return _SUCCESS_; } /** * This routine frees all the memory space allocated by lensing_init(). * * To be called at the end of each run, only when no further calls to * lensing_cl_at_l() are needed. * * @param ple Input: pointer to lensing structure (which fields must be freed) * @return the error status */ int lensing_free( struct lensing * ple ) { if (ple->has_lensed_cls == _TRUE_) { free(ple->l); free(ple->cl_lens); free(ple->ddcl_lens); free(ple->l_max_lt); } return _SUCCESS_; } /** * This routine defines indices and allocates tables in the lensing structure * * @param ppr Input: pointer to precision structure * @param psp Input: pointer to spectra structure * @param ple Input/output: pointer to lensing structure * @return the error status */ int lensing_indices( struct precision * ppr, struct spectra * psp, struct lensing * ple ){ int index_l; double ** cl_md_ic; /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] */ double ** cl_md; /* array with argument cl_md[index_md][index_ct] */ int index_md; int index_lt; /* indices of all Cl types (lensed and unlensed) */ if (psp->has_tt == _TRUE_) { ple->has_tt = _TRUE_; ple->index_lt_tt=psp->index_ct_tt; } else { ple->has_tt = _FALSE_; } if (psp->has_ee == _TRUE_) { ple->has_ee = _TRUE_; ple->index_lt_ee=psp->index_ct_ee; } else { ple->has_ee = _FALSE_; } if (psp->has_te == _TRUE_) { ple->has_te = _TRUE_; ple->index_lt_te=psp->index_ct_te; } else { ple->has_te = _FALSE_; } if (psp->has_bb == _TRUE_) { ple->has_bb = _TRUE_; ple->index_lt_bb=psp->index_ct_bb; } else { ple->has_bb = _FALSE_; } if (psp->has_pp == _TRUE_) { ple->has_pp = _TRUE_; ple->index_lt_pp=psp->index_ct_pp; } else { ple->has_pp = _FALSE_; } if (psp->has_tp == _TRUE_) { ple->has_tp = _TRUE_; ple->index_lt_tp=psp->index_ct_tp; } else { ple->has_tp = _FALSE_; } if (psp->has_dd == _TRUE_) { ple->has_dd = _TRUE_; ple->index_lt_dd=psp->index_ct_dd; } else { ple->has_dd = _FALSE_; } if (psp->has_td == _TRUE_) { ple->has_td = _TRUE_; ple->index_lt_td=psp->index_ct_td; } else { ple->has_td = _FALSE_; } if (psp->has_ll == _TRUE_) { ple->has_ll = _TRUE_; ple->index_lt_ll=psp->index_ct_ll; } else { ple->has_ll = _FALSE_; } if (psp->has_tl == _TRUE_) { ple->has_tl = _TRUE_; ple->index_lt_tl=psp->index_ct_tl; } else { ple->has_tl = _FALSE_; } ple->lt_size = psp->ct_size; /* number of multipoles */ ple->l_unlensed_max = psp->l_max_tot; ple->l_lensed_max = ple->l_unlensed_max - ppr->delta_l_max; for (index_l=0; (index_l < psp->l_size_max) && (psp->l[index_l] <= ple->l_lensed_max); index_l++); if (index_l < psp->l_size_max) index_l++; /* one more point in order to be able to interpolate till ple->l_lensed_max */ ple->l_size = index_l+1; class_alloc(ple->l,ple->l_size*sizeof(double),ple->error_message); for (index_l=0; index_l < ple->l_size; index_l++) { ple->l[index_l] = psp->l[index_l]; } /* allocate table where results will be stored */ class_alloc(ple->cl_lens, ple->l_size*ple->lt_size*sizeof(double), ple->error_message); class_alloc(ple->ddcl_lens, ple->l_size*ple->lt_size*sizeof(double), ple->error_message); /* fill with unlensed cls */ class_alloc(cl_md_ic, psp->md_size*sizeof(double *), ple->error_message); class_alloc(cl_md, psp->md_size*sizeof(double *), ple->error_message); for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) class_alloc(cl_md[index_md], psp->ct_size*sizeof(double), ple->error_message); if (psp->ic_size[index_md] > 1) class_alloc(cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size*sizeof(double), ple->error_message); } for (index_l=0; index_l<ple->l_size; index_l++) { class_call(spectra_cl_at_l(psp,ple->l[index_l],&(ple->cl_lens[index_l*ple->lt_size]),cl_md,cl_md_ic), psp->error_message, ple->error_message); } for (index_md = 0; index_md < psp->md_size; index_md++) { if (psp->md_size > 1) free(cl_md[index_md]); if (psp->ic_size[index_md] > 1) free(cl_md_ic[index_md]); } free(cl_md_ic); free(cl_md); /* we want to output Cl_lensed up to the same l_max as Cl_unlensed (even if a number delta_l_max of extra values of l have been used internally for more accurate results). Notable exception to the above rule: ClBB_lensed(scalars) must be outputed at least up to the same l_max as ClEE_unlensed(scalars) (since ClBB_unlensed is null for scalars) */ class_alloc(ple->l_max_lt,ple->lt_size*sizeof(double),ple->error_message); for (index_lt = 0; index_lt < ple->lt_size; index_lt++) { ple->l_max_lt[index_lt]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][index_lt]); if ((ple->has_bb == _TRUE_) && (ple->has_ee == _TRUE_) && (index_lt == ple->index_lt_bb)) { ple->l_max_lt[index_lt]=MAX(ple->l_max_lt[index_lt],psp->l_max_ct[index_md][ple->index_lt_ee]); } } } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksi Input: Lensed correlation function (ksi[index_mu]) * @param d00 Input: Legendre polynomials (\f$ d^l_{00}\f$[l][index_mu]) * @param w8 Input: Legendre quadrature weights (w8[index_mu]) * @param nmu Input: Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_tt( double *ksi, double **d00, double *w8, int nmu, struct lensing * ple ) { double cle; int imu; int index_l; /** Integration by Gauss-Legendre quadrature. **/ #pragma omp parallel for \ private (imu,index_l,cle) \ schedule (static) for(index_l=0; index_l<ple->l_size; index_l++){ cle=0; for (imu=0;imu<nmu;imu++) { cle += ksi[imu]*d00[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt]=cle*2.0*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed \f$ cl_{tt}\f$ power spectrum * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_tt Input: Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_tt( struct lensing * ple, double *cl_tt) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_tt] += cl_tt[l]; } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksiX Input: Lensed correlation function (ksiX[index_mu]) * @param d20 Input: Wigner d-function (\f$ d^l_{20}\f$[l][index_mu]) * @param w8 Input: Legendre quadrature weights (w8[index_mu]) * @param nmu Input: Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_te( double *ksiX, double **d20, double *w8, int nmu, struct lensing * ple ) { double clte; int imu; int index_l; /** Integration by Gauss-Legendre quadrature. **/ #pragma omp parallel for \ private (imu,index_l,clte) \ schedule (static) for(index_l=0; index_l < ple->l_size; index_l++){ clte=0; for (imu=0;imu<nmu;imu++) { clte += ksiX[imu]*d20[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te]=clte*2.0*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed \f$ cl_{te}\f$ power spectrum * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_te Input: Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_te( struct lensing * ple, double *cl_te) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_te] += cl_te[l]; } return _SUCCESS_; } /** * This routine computes the lensed power spectra by Gaussian quadrature * * @param ksip Input: Lensed correlation function (ksi+[index_mu]) * @param ksim Input: Lensed correlation function (ksi-[index_mu]) * @param d22 Input: Wigner d-function (\f$ d^l_{22}\f$[l][index_mu]) * @param d2m2 Input: Wigner d-function (\f$ d^l_{2-2}\f$[l][index_mu]) * @param w8 Input: Legendre quadrature weights (w8[index_mu]) * @param nmu Input: Number of quadrature points (0<=index_mu<=nmu) * @param ple Input/output: Pointer to the lensing structure * @return the error status */ int lensing_lensed_cl_ee_bb( double *ksip, double *ksim, double **d22, double **d2m2, double *w8, int nmu, struct lensing * ple ) { double clp, clm; int imu; int index_l; /** Integration by Gauss-Legendre quadrature. **/ #pragma omp parallel for \ private (imu,index_l,clp,clm) \ schedule (static) for(index_l=0; index_l < ple->l_size; index_l++){ clp=0; clm=0; for (imu=0;imu<nmu;imu++) { clp += ksip[imu]*d22[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ clm += ksim[imu]*d2m2[imu][(int)ple->l[index_l]]*w8[imu]; /* loop could be optimized */ } ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee]=(clp+clm)*_PI_; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb]=(clp-clm)*_PI_; } return _SUCCESS_; } /** * This routine adds back the unlensed \f$ cl_{ee}\f$, \f$ cl_{bb}\f$ power spectra * Used in case of fast (and BB inaccurate) integration of * correlation functions. * * @param ple Input/output: Pointer to the lensing structure * @param cl_ee Input: Array of unlensed power spectrum * @param cl_bb Input: Array of unlensed power spectrum * @return the error status */ int lensing_addback_cl_ee_bb( struct lensing * ple, double * cl_ee, double * cl_bb) { int index_l, l; for (index_l=0; index_l<ple->l_size; index_l++) { l = (int)ple->l[index_l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_ee] += cl_ee[l]; ple->cl_lens[index_l*ple->lt_size+ple->index_lt_bb] += cl_bb[l]; } return _SUCCESS_; } /** * This routine computes the d00 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d00 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d00( double * mu, int num_mu, int lmax, double ** d00 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); for (l=1; l<lmax; l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(2*ll+1)/(ll+1); fac2[l] = sqrt((2*ll+3)/(2*ll-1))*ll/(ll+1); fac3[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { dlm1=1.0/sqrt(2.); /* l=0 */ d00[index_mu][0]=dlm1*sqrt(2.); dl=mu[index_mu] * sqrt(3./2.); /*l=1*/ d00[index_mu][1]=dl*sqrt(2./3.); for(l=1;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d00 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac2[l]*dlm1; d00[index_mu][l+1] = dlp1 * fac3[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); return _SUCCESS_; } /** * This routine computes the d11 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d11 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d11( double * mu, int num_mu, int lmax, double ** d11 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2)); fac2[l] = 1.0/(ll*(ll+1.)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d11[index_mu][0]=0; dlm1=(1.0+mu[index_mu])/2. * sqrt(3./2.); /*l=1*/ d11[index_mu][1]=dlm1 * sqrt(2./3.); dl=(1.0+mu[index_mu])/2.*(2.0*mu[index_mu]-1.0) * sqrt(5./2.); /*l=2*/ d11[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d11 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d11[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d1m1 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d1m1 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d1m1( double * mu, int num_mu, int lmax, double ** d1m1 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/(ll*(ll+2)); fac2[l] = 1.0/(ll*(ll+1.)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-1)*(ll+1)/(ll*(ll+2))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d1m1[index_mu][0]=0; dlm1=(1.0-mu[index_mu])/2. * sqrt(3./2.); /*l=1*/ d1m1[index_mu][1]=dlm1 * sqrt(2./3.); dl=(1.0-mu[index_mu])/2.*(2.0*mu[index_mu]+1.0) * sqrt(5./2.); /*l=2*/ d1m1[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d1m1 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d1m1[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d2m2 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d2m2 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d2m2( double * mu, int num_mu, int lmax, double ** d2m2 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3)); fac2[l] = 4.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d2m2[index_mu][0]=0; dlm1=0.; /*l=1*/ d2m2[index_mu][1]=0; dl=(1.0-mu[index_mu])*(1.0-mu[index_mu])/4. * sqrt(5./2.); /*l=2*/ d2m2[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d2m2 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d2m2[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d22 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d22 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d22( double * mu, int num_mu, int lmax, double ** d22 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)/(2*ll+1))*(ll+1)*(2*ll+1)/((ll-1)*(ll+3)); fac2[l] = 4.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-2)*(ll+2)/((ll-1)*(ll+3))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d22[index_mu][0]=0; dlm1=0.; /*l=1*/ d22[index_mu][1]=0; dl=(1.0+mu[index_mu])*(1.0+mu[index_mu])/4. * sqrt(5./2.); /*l=2*/ d22[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d22[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d20 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d20 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d20( double * mu, int num_mu, int lmax, double ** d20 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=2;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-1)*(ll+3))); fac3[l] = sqrt((2*ll+3)*(ll-2)*(ll+2)/((2*ll-1)*(ll-1)*(ll+3))); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d20[index_mu][0]=0; dlm1=0.; /*l=1*/ d20[index_mu][1]=0; dl=sqrt(15.)/4.*(1-mu[index_mu]*mu[index_mu]); /*l=2*/ d20[index_mu][2] = dl * sqrt(2./5.); for(l=2;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1; d20[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d31 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d31 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d31( double * mu, int num_mu, int lmax, double ** d31 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1); fac2[l] = 3.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d31[index_mu][0]=0; d31[index_mu][1]=0; dlm1=0.; /*l=2*/ d31[index_mu][2]=0; dl=sqrt(105./2.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d31[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]-fac2[l])*dl - fac3[l]*dlm1; d31[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d3m1 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d3m1 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d3m1( double * mu, int num_mu, int lmax, double ** d3m1 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-2)*(ll+4)*ll*(ll+2))) * (ll+1); fac2[l] = 3.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1)*(ll-3)*(ll+3)*(ll-1)*(ll+1)/((ll-2)*(ll+4)*ll*(ll+2)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d3m1[index_mu][0]=0; d3m1[index_mu][1]=0; dlm1=0.; /*l=2*/ d3m1[index_mu][2]=0; dl=sqrt(105./2.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d3m1[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d3m1[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d3m3 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d3m3 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d3m3( double * mu, int num_mu, int lmax, double ** d3m3 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=3;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-2)*(ll+4)); fac2[l] = 9.0/(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-3)*(ll+3)*(l+1)/((ll-2)*(ll+4)*ll); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d3m3[index_mu][0]=0; d3m3[index_mu][1]=0; dlm1=0.; /*l=2*/ d3m3[index_mu][2]=0; dl=sqrt(7./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/8.; /*l=3*/ d3m3[index_mu][3] = dl * sqrt(2./7.); for(l=3;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d3m3[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d40 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d40 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d40( double * mu, int num_mu, int lmax, double ** d40 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5))); fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)/((2*ll-1)*(ll-3)*(ll+5))); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d40[index_mu][0]=0; d40[index_mu][1]=0; d40[index_mu][2]=0; dlm1=0.; /*l=3*/ d40[index_mu][3]=0; dl=sqrt(315.)*(1+mu[index_mu])*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d40[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*mu[index_mu]*dl - fac3[l]*dlm1; d40[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d4m2 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d4m2 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d4m2( double * mu, int num_mu, int lmax, double ** d4m2 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1)/((ll-3)*(ll+5)*(ll-1)*(ll+3))) * (ll+1.); fac2[l] = 8./(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)*(ll-4)*(ll+4)*(ll-2)*(ll+2)/((2*ll-1)*(ll-3)*(ll+5)*(ll-1)*(ll+3)))*(ll+1)/ll; fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d4m2[index_mu][0]=0; d4m2[index_mu][1]=0; d4m2[index_mu][2]=0; dlm1=0.; /*l=3*/ d4m2[index_mu][3]=0; dl=sqrt(126.)*(1+mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d4m2[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d4m2[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; } /** * This routine computes the d4m4 term * * @param mu Input: Vector of cos(beta) values * @param num_mu Input: Number of cos(beta) values * @param lmax Input: maximum multipole * @param d4m4 Input/output: Result is stored here * * Wigner d-functions, computed by recurrence * actual recurrence on \f$ \sqrt{(2l+1)/2} d^l_{mm'} \f$ for stability * Formulae from Kostelec & Rockmore 2003 **/ int lensing_d4m4( double * mu, int num_mu, int lmax, double ** d4m4 ) { double ll, dlm1, dl, dlp1; int index_mu, l; double *fac1, *fac2, *fac3, *fac4; ErrorMsg erreur; class_alloc(fac1,lmax*sizeof(double),erreur); class_alloc(fac2,lmax*sizeof(double),erreur); class_alloc(fac3,lmax*sizeof(double),erreur); class_alloc(fac4,lmax*sizeof(double),erreur); for (l=4;l<lmax;l++) { ll = (double) l; fac1[l] = sqrt((2*ll+3)*(2*ll+1))*(ll+1)/((ll-3)*(ll+5)); fac2[l] = 16./(ll*(ll+1)); fac3[l] = sqrt((2*ll+3)/(2*ll-1))*(ll-4)*(ll+4)*(ll+1)/((ll-3)*(ll+5)*ll); fac4[l] = sqrt(2./(2*ll+3)); } #pragma omp parallel for \ private (index_mu,dlm1,dl,dlp1,l,ll) \ schedule (static) for (index_mu=0;index_mu<num_mu;index_mu++) { d4m4[index_mu][0]=0; d4m4[index_mu][1]=0; d4m4[index_mu][2]=0; dlm1=0.; /*l=3*/ d4m4[index_mu][3]=0; dl=sqrt(9./2.)*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])*(1-mu[index_mu])/16.; /*l=4*/ d4m4[index_mu][4] = dl * sqrt(2./9.); for(l=4;l<lmax;l++){ ll=(double) l; /* sqrt((2l+1)/2)*d22 recurrence, supposed to be more stable */ dlp1 = fac1[l]*(mu[index_mu]+fac2[l])*dl - fac3[l]*dlm1; d4m4[index_mu][l+1] = dlp1 * fac4[l]; dlm1 = dl; dl = dlp1; } } free(fac1); free(fac2); free(fac3); free(fac4); return _SUCCESS_; }
dataset.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_DATASET_H_ #define LIGHTGBM_DATASET_H_ #include <LightGBM/config.h> #include <LightGBM/feature_group.h> #include <LightGBM/meta.h> #include <LightGBM/utils/common.h> #include <LightGBM/utils/openmp_wrapper.h> #include <LightGBM/utils/random.h> #include <LightGBM/utils/text_reader.h> #include <string> #include <functional> #include <memory> #include <mutex> #include <unordered_set> #include <utility> #include <vector> namespace LightGBM { /*! \brief forward declaration */ class DatasetLoader; /*! * \brief This class is used to store some meta(non-feature) data for training data, * e.g. labels, weights, initial scores, query level informations. * * Some details: * 1. Label, used for training. * 2. Weights, weighs of records, optional * 3. Query Boundaries, necessary for lambdarank. * The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] ) * 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed) * the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1]) * 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0. */ class Metadata { public: /*! * \brief Null constructor */ Metadata(); /*! * \brief Initialization will load query level informations, since it is need for sampling data * \param data_filename Filename of data * \param init_score_filename Filename of initial score */ void Init(const char* data_filename, const char* initscore_file); /*! * \brief init as subset * \param metadata Filename of data * \param used_indices * \param num_used_indices */ void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices); /*! * \brief Initial with binary memory * \param memory Pointer to memory */ void LoadFromMemory(const void* memory); /*! \brief Destructor */ ~Metadata(); /*! * \brief Initial work, will allocate space for label, weight(if exists) and query(if exists) * \param num_data Number of training data * \param weight_idx Index of weight column, < 0 means doesn't exists * \param query_idx Index of query id column, < 0 means doesn't exists */ void Init(data_size_t num_data, int weight_idx, int query_idx); /*! * \brief Partition label by used indices * \param used_indices Indices of local used */ void PartitionLabel(const std::vector<data_size_t>& used_indices); /*! * \brief Partition meta data according to local used indices if need * \param num_all_data Number of total training data, including other machines' data on parallel learning * \param used_data_indices Indices of local used training data */ void CheckOrPartition(data_size_t num_all_data, const std::vector<data_size_t>& used_data_indices); void SetLabel(const label_t* label, data_size_t len); void SetWeights(const label_t* weights, data_size_t len); void SetQuery(const data_size_t* query, data_size_t len); /*! * \brief Set initial scores * \param init_score Initial scores, this class will manage memory for init_score. */ void SetInitScore(const double* init_score, data_size_t len); /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const; /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const; /*! * \brief Get pointer of label * \return Pointer of label */ inline const label_t* label() const { return label_.data(); } /*! * \brief Set label for one record * \param idx Index of this record * \param value Label value of this record */ inline void SetLabelAt(data_size_t idx, label_t value) { label_[idx] = value; } /*! * \brief Set Weight for one record * \param idx Index of this record * \param value Weight value of this record */ inline void SetWeightAt(data_size_t idx, label_t value) { weights_[idx] = value; } /*! * \brief Set Query Id for one record * \param idx Index of this record * \param value Query Id value of this record */ inline void SetQueryAt(data_size_t idx, data_size_t value) { queries_[idx] = static_cast<data_size_t>(value); } /*! * \brief Get weights, if not exists, will return nullptr * \return Pointer of weights */ inline const label_t* weights() const { if (!weights_.empty()) { return weights_.data(); } else { return nullptr; } } /*! * \brief Get data boundaries on queries, if not exists, will return nullptr * we assume data will order by query, * the interval of [query_boundaris[i], query_boundaris[i+1]) * is the data indices for query i. * \return Pointer of data boundaries on queries */ inline const data_size_t* query_boundaries() const { if (!query_boundaries_.empty()) { return query_boundaries_.data(); } else { return nullptr; } } /*! * \brief Get Number of queries * \return Number of queries */ inline data_size_t num_queries() const { return num_queries_; } /*! * \brief Get weights for queries, if not exists, will return nullptr * \return Pointer of weights for queries */ inline const label_t* query_weights() const { if (!query_weights_.empty()) { return query_weights_.data(); } else { return nullptr; } } /*! * \brief Get initial scores, if not exists, will return nullptr * \return Pointer of initial scores */ inline const double* init_score() const { if (!init_score_.empty()) { return init_score_.data(); } else { return nullptr; } } /*! * \brief Get size of initial scores */ inline int64_t num_init_score() const { return num_init_score_; } /*! \brief Disable copy */ Metadata& operator=(const Metadata&) = delete; /*! \brief Disable copy */ Metadata(const Metadata&) = delete; private: /*! \brief Load initial scores from file */ void LoadInitialScore(const char* initscore_file); /*! \brief Load wights from file */ void LoadWeights(); /*! \brief Load query boundaries from file */ void LoadQueryBoundaries(); /*! \brief Load query wights */ void LoadQueryWeights(); /*! \brief Filename of current data */ std::string data_filename_; /*! \brief Number of data */ data_size_t num_data_; /*! \brief Number of weights, used to check correct weight file */ data_size_t num_weights_; /*! \brief Label data */ std::vector<label_t> label_; /*! \brief Weights data */ std::vector<label_t> weights_; /*! \brief Query boundaries */ std::vector<data_size_t> query_boundaries_; /*! \brief Query weights */ std::vector<label_t> query_weights_; /*! \brief Number of querys */ data_size_t num_queries_; /*! \brief Number of Initial score, used to check correct weight file */ int64_t num_init_score_; /*! \brief Initial score */ std::vector<double> init_score_; /*! \brief Queries data */ std::vector<data_size_t> queries_; /*! \brief mutex for threading safe call */ std::mutex mutex_; bool weight_load_from_file_; bool query_load_from_file_; bool init_score_load_from_file_; }; /*! \brief Interface for Parser */ class Parser { public: /*! \brief virtual destructor */ virtual ~Parser() {} /*! * \brief Parse one line with label * \param str One line record, string format, should end with '\0' * \param out_features Output columns, store in (column_idx, values) * \param out_label Label will store to this if exists */ virtual void ParseOneLine(const char* str, std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0; virtual int NumFeatures() const = 0; /*! * \brief Create an object of parser, will auto choose the format depend on file * \param filename One Filename of data * \param num_features Pass num_features of this data file if you know, <=0 means don't know * \param label_idx index of label column * \return Object of parser */ static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx); }; /*! \brief The main class of data set, * which are used to training or validation */ class Dataset { public: friend DatasetLoader; LIGHTGBM_EXPORT Dataset(); LIGHTGBM_EXPORT Dataset(data_size_t num_data); void Construct( std::vector<std::unique_ptr<BinMapper>>* bin_mappers, int num_total_features, const std::vector<std::vector<double>>& forced_bins, int** sample_non_zero_indices, double** sample_values, const int* num_per_col, int num_sample_col, size_t total_sample_cnt, const Config& io_config); /*! \brief Destructor */ LIGHTGBM_EXPORT ~Dataset(); LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const { if (num_features_ != other.num_features_) { return false; } if (num_total_features_ != other.num_total_features_) { return false; } if (label_idx_ != other.label_idx_) { return false; } for (int i = 0; i < num_features_; ++i) { if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) { return false; } } return true; } inline void FinishOneRow(int tid, data_size_t row_idx, const std::vector<bool>& is_feature_added) { if (is_finish_load_) { return; } for (auto fidx : feature_need_push_zeros_) { if (is_feature_added[fidx]) { continue; } const int group = feature2group_[fidx]; const int sub_feature = feature2subfeature_[fidx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, 0.0f); } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) { if (is_finish_load_) { return; } for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) { int feature_idx = used_feature_map_[i]; if (feature_idx >= 0) { const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]); } } } inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) { if (is_finish_load_) { return; } std::vector<bool> is_feature_added(num_features_, false); for (auto& inner_data : feature_values) { if (inner_data.first >= num_total_features_) { continue; } int feature_idx = used_feature_map_[inner_data.first]; if (feature_idx >= 0) { is_feature_added[feature_idx] = true; const int group = feature2group_[feature_idx]; const int sub_feature = feature2subfeature_[feature_idx]; feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second); } } FinishOneRow(tid, row_idx, is_feature_added); } inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) { feature_groups_[group]->PushData(tid, sub_feature, row_idx, value); } inline int RealFeatureIndex(int fidx) const { return real_feature_idx_[fidx]; } inline int InnerFeatureIndex(int col_idx) const { return used_feature_map_[col_idx]; } inline int Feature2Group(int feature_idx) const { return feature2group_[feature_idx]; } inline int Feture2SubFeature(int feature_idx) const { return feature2subfeature_[feature_idx]; } inline uint64_t GroupBinBoundary(int group_idx) const { return group_bin_boundaries_[group_idx]; } inline uint64_t NumTotalBin() const { return group_bin_boundaries_.back(); } inline std::vector<int> ValidFeatureIndices() const { std::vector<int> ret; for (int i = 0; i < num_total_features_; ++i) { if (used_feature_map_[i] >= 0) { ret.push_back(i); } } return ret; } void ReSize(data_size_t num_data); void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data); LIGHTGBM_EXPORT void FinishLoad(); LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element); LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr); LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr); LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr); LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr); /*! * \brief Save current dataset into binary file, will save to "filename.bin" */ LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename); LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename); LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset); LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset); void ConstructHistograms(const std::vector<int8_t>& is_feature_used, const data_size_t* data_indices, data_size_t num_data, int leaf_idx, std::vector<std::unique_ptr<OrderedBin>>* ordered_bins, const score_t* gradients, const score_t* hessians, score_t* ordered_gradients, score_t* ordered_hessians, bool is_constant_hessian, HistogramBinEntry* histogram_data) const; void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data, HistogramBinEntry* data) const; inline data_size_t Split(int feature, const uint32_t* threshold, int num_threshold, bool default_left, data_size_t* data_indices, data_size_t num_data, data_size_t* lte_indices, data_size_t* gt_indices) const { const int group = feature2group_[feature]; const int sub_feature = feature2subfeature_[feature]; return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices); } inline int SubFeatureBinOffset(int i) const { const int sub_feature = feature2subfeature_[i]; if (sub_feature == 0) { return 1; } else { return 0; } } inline int FeatureNumBin(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin(); } inline int8_t FeatureMonotone(int i) const { if (monotone_types_.empty()) { return 0; } else { return monotone_types_[i]; } } inline double FeaturePenalte(int i) const { if (feature_penalty_.empty()) { return 1; } else { return feature_penalty_[i]; } } bool HasMonotone() const { if (monotone_types_.empty()) { return false; } else { for (size_t i = 0; i < monotone_types_.size(); ++i) { if (monotone_types_[i] != 0) { return true; } } return false; } } inline int FeatureGroupNumBin(int group) const { return feature_groups_[group]->num_total_bin_; } inline const BinMapper* FeatureBinMapper(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature].get(); } inline const Bin* FeatureBin(int i) const { const int group = feature2group_[i]; return feature_groups_[group]->bin_data_.get(); } inline const Bin* FeatureGroupBin(int group) const { return feature_groups_[group]->bin_data_.get(); } inline bool FeatureGroupIsSparse(int group) const { return feature_groups_[group]->is_sparse_; } inline BinIterator* FeatureIterator(int i) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->SubFeatureIterator(sub_feature); } inline BinIterator* FeatureGroupIterator(int group) const { return feature_groups_[group]->FeatureGroupIterator(); } inline double RealThreshold(int i, uint32_t threshold) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold); } // given a real threshold, find the closest threshold bin inline uint32_t BinThreshold(int i, double threshold_double) const { const int group = feature2group_[i]; const int sub_feature = feature2subfeature_[i]; return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double); } inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const { ordered_bins->resize(num_groups_); OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_groups_; ++i) { OMP_LOOP_EX_BEGIN(); ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin()); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } /*! * \brief Get meta data pointer * \return Pointer of meta data */ inline const Metadata& metadata() const { return metadata_; } /*! \brief Get Number of used features */ inline int num_features() const { return num_features_; } /*! \brief Get Number of feature groups */ inline int num_feature_groups() const { return num_groups_;} /*! \brief Get Number of total features */ inline int num_total_features() const { return num_total_features_; } /*! \brief Get the index of label column */ inline int label_idx() const { return label_idx_; } /*! \brief Get names of current data set */ inline const std::vector<std::string>& feature_names() const { return feature_names_; } inline void set_feature_names(const std::vector<std::string>& feature_names) { if (feature_names.size() != static_cast<size_t>(num_total_features_)) { Log::Fatal("Size of feature_names error, should equal with total number of features"); } feature_names_ = std::vector<std::string>(feature_names); std::unordered_set<std::string> feature_name_set; // replace ' ' in feature_names with '_' bool spaceInFeatureName = false; for (auto& feature_name : feature_names_) { // check ascii if (!Common::CheckASCII(feature_name)) { Log::Fatal("Do not support non-ASCII characters in feature name."); } // check json if (!Common::CheckAllowedJSON(feature_name)) { Log::Fatal("Do not support special JSON characters in feature name."); } if (feature_name.find(' ') != std::string::npos) { spaceInFeatureName = true; std::replace(feature_name.begin(), feature_name.end(), ' ', '_'); } if (feature_name_set.count(feature_name) > 0) { Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str()); } feature_name_set.insert(feature_name); } if (spaceInFeatureName) { Log::Warning("Find whitespaces in feature_names, replace with underlines"); } } inline std::vector<std::string> feature_infos() const { std::vector<std::string> bufs; for (int i = 0; i < num_total_features_; i++) { int fidx = used_feature_map_[i]; if (fidx == -1) { bufs.push_back("none"); } else { const auto bin_mapper = FeatureBinMapper(fidx); bufs.push_back(bin_mapper->bin_info()); } } return bufs; } void ResetConfig(const char* parameters); /*! \brief Get Number of data */ inline data_size_t num_data() const { return num_data_; } /*! \brief Disable copy */ Dataset& operator=(const Dataset&) = delete; /*! \brief Disable copy */ Dataset(const Dataset&) = delete; void addFeaturesFrom(Dataset* other); private: std::string data_filename_; /*! \brief Store used features */ std::vector<std::unique_ptr<FeatureGroup>> feature_groups_; /*! \brief Mapper from real feature index to used index*/ std::vector<int> used_feature_map_; /*! \brief Number of used features*/ int num_features_; /*! \brief Number of total features*/ int num_total_features_; /*! \brief Number of total data*/ data_size_t num_data_; /*! \brief Store some label level data*/ Metadata metadata_; /*! \brief index of label column */ int label_idx_ = 0; /*! \brief Threshold for treating a feature as a sparse feature */ double sparse_threshold_; /*! \brief store feature names */ std::vector<std::string> feature_names_; /*! \brief store feature names */ static const char* binary_file_token; int num_groups_; std::vector<int> real_feature_idx_; std::vector<int> feature2group_; std::vector<int> feature2subfeature_; std::vector<uint64_t> group_bin_boundaries_; std::vector<int> group_feature_start_; std::vector<int> group_feature_cnt_; std::vector<int8_t> monotone_types_; std::vector<double> feature_penalty_; bool is_finish_load_; int max_bin_; std::vector<int32_t> max_bin_by_feature_; std::vector<std::vector<double>> forced_bin_bounds_; int bin_construct_sample_cnt_; int min_data_in_bin_; bool use_missing_; bool zero_as_missing_; std::vector<int> feature_need_push_zeros_; }; } // namespace LightGBM #endif // LightGBM_DATA_H_
tabu_search_core.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_SOLVER_TABU_SEARCH_CORE_TABU_SEARCH_CORE_H__ #define PRINTEMPS_SOLVER_TABU_SEARCH_CORE_TABU_SEARCH_CORE_H__ #include "../../memory.h" #include "tabu_search_core_move_score.h" #include "tabu_search_core_move_evaluator.h" #include "tabu_search_core_termination_status.h" #include "tabu_search_core_state.h" #include "tabu_search_core_state_manager.h" #include "tabu_search_core_result.h" namespace printemps { namespace solver { namespace tabu_search { namespace core { /*****************************************************************************/ template <class T_Variable, class T_Expression> class TabuSearchCore { private: model::Model<T_Variable, T_Expression>* m_model_ptr; std::vector<multi_array::ValueProxy<T_Variable>> m_initial_variable_value_proxies; solution::IncumbentHolder<T_Variable, T_Expression>* m_incumbent_holder_ptr; Memory<T_Variable, T_Expression>* m_memory_ptr; option::Option m_option; std::vector<solution::SparseSolution<T_Variable, T_Expression>> m_feasible_solutions; TabuSearchCoreStateManager<T_Variable, T_Expression> m_state_manager; TabuSearchCoreResult<T_Variable, T_Expression> m_result; std::mt19937 m_get_rand_mt; /*************************************************************************/ inline void preprocess(void) { /** * Reset the local augmented incumbent. */ m_incumbent_holder_ptr->reset_local_augmented_incumbent(); /** * Reset the feasible solutions storage. */ m_feasible_solutions.clear(); /** * Prepare a random generator, which is used for shuffling moves. */ m_get_rand_mt.seed(m_option.tabu_search.seed); /** * Reset the last update iterations. */ m_memory_ptr->reset_last_update_iterations(); /** * Initialize the solution and update the model. */ m_model_ptr->import_variable_values(m_initial_variable_value_proxies); m_model_ptr->update(); /** * Reset the variable improvability. */ m_model_ptr->reset_variable_objective_improvabilities(); m_model_ptr->reset_variable_feasibility_improvabilities(); m_state_manager.setup(m_model_ptr, // m_incumbent_holder_ptr, // m_memory_ptr, // m_option); } /*************************************************************************/ inline void postprocess(void) { /** * Prepare the result. */ m_result = TabuSearchCoreResult<T_Variable, T_Expression>( m_state_manager.state()); } /*************************************************************************/ inline bool satisfy_time_over_terminate_condition(void) { const auto& STATE = m_state_manager.state(); if (STATE.elapsed_time > m_option.tabu_search.time_max) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::TIME_OVER); return true; } if (STATE.elapsed_time + m_option.tabu_search.time_offset > m_option.time_max) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::TIME_OVER); return true; } return false; } /*************************************************************************/ inline bool satisfy_iteration_over_terminate_condition(void) { const auto& STATE = m_state_manager.state(); if (STATE.iteration >= m_option.tabu_search.iteration_max) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::ITERATION_OVER); return true; } return false; } /*************************************************************************/ inline bool satisfy_reach_target_terminate_condition(void) { if (m_incumbent_holder_ptr->feasible_incumbent_objective() <= m_option.target_objective_value) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::REACH_TARGET); return true; } return false; } /*************************************************************************/ inline bool satisfy_early_stop_terminate_condition(void) { const auto& STATE = m_state_manager.state(); if (STATE.local_augmented_incumbent_update_count > m_option.tabu_search.pruning_rate_threshold * m_option.tabu_search.iteration_max) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::EARLY_STOP); return true; } return false; } /*************************************************************************/ inline bool satisfy_optimal_or_no_move_terminate_condition(void) { const auto& STATE = m_state_manager.state(); if (STATE.number_of_moves > 0) { return false; } if (m_model_ptr->is_linear() && m_model_ptr->is_feasible()) { /** * NOTE: If the current solution is feasible and there is no * improvable solution, the solution should be an optimum. It can * happen for decomp2 instance in MIPLIB 2017. */ m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::OPTIMAL); for (const auto& variable_ptr : m_model_ptr->variable_reference().variable_ptrs) { if (variable_ptr->is_objective_improvable()) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::NO_MOVE); break; } } return true; } else { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::NO_MOVE); return true; } return false; } /*************************************************************************/ inline bool satisfy_penalty_coefficient_too_large_terminate_condition( const std::vector<solution::SolutionScore>& a_TRIAL_SOLUTION_SCORES) { const auto& STATE = m_state_manager.state(); constexpr int ITERATION_MIN = 10; constexpr double MARGIN = 100.0; if (STATE.iteration <= ITERATION_MIN) { return false; } if (!STATE.current_solution_score.is_feasible) { return false; } double min_infeasible_local_penalty = HUGE_VALF; bool has_infeasible_trial_solution = false; for (const auto& score : a_TRIAL_SOLUTION_SCORES) { if (!score.is_feasible) { min_infeasible_local_penalty = std::min(min_infeasible_local_penalty, score.local_penalty); has_infeasible_trial_solution = true; } } if (!has_infeasible_trial_solution) { return false; } const auto SCORE_PTR_PAIR = std::minmax_element( a_TRIAL_SOLUTION_SCORES.begin(), a_TRIAL_SOLUTION_SCORES.end(), [](const auto& a_FIRST, const auto& a_SECOND) { return a_FIRST.objective_improvement < a_SECOND.objective_improvement; }); const double MAX_OBJECTIVE_SENSITIVITY = std::max(SCORE_PTR_PAIR.second->objective_improvement, -SCORE_PTR_PAIR.first->objective_improvement); if (MAX_OBJECTIVE_SENSITIVITY * MARGIN < min_infeasible_local_penalty) { m_state_manager.set_termination_status( TabuSearchCoreTerminationStatus::PENALTY_COEFFICIENT_TOO_LARGE); return true; } return false; } /*************************************************************************/ inline void update_moves(void) { const auto& STATE = m_state_manager.state(); bool accept_all = true; bool accept_objective_improvable = true; bool accept_feasibility_improvable = true; if (!m_model_ptr->is_linear() || m_option.improvability_screening_mode == option::improvability_screening_mode::Off) { m_model_ptr->neighborhood().update_moves( accept_all, // accept_objective_improvable, // accept_feasibility_improvable, // m_option.is_enabled_parallel_neighborhood_update); m_state_manager.set_number_of_moves( m_model_ptr->neighborhood().move_ptrs().size()); return; } /** * If the option improvability_screening_mode is not Off, only * improvable moves will be generated. */ if (STATE.iteration == 0) { m_model_ptr->update_variable_objective_improvabilities(); } else { m_model_ptr->update_variable_objective_improvabilities( utility::to_vector( neighborhood::related_variable_ptrs(STATE.current_move))); } switch (m_option.improvability_screening_mode) { case option::improvability_screening_mode::Soft: { if (m_model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { m_model_ptr->reset_variable_feasibility_improvabilities(); m_model_ptr->update_variable_feasibility_improvabilities( m_model_ptr->violative_constraint_ptrs()); accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = true; } break; } case option::improvability_screening_mode::Aggressive: { if (m_model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { m_model_ptr->reset_variable_feasibility_improvabilities(); m_model_ptr->update_variable_feasibility_improvabilities( m_model_ptr->violative_constraint_ptrs()); accept_all = false; accept_objective_improvable = false; accept_feasibility_improvable = true; } break; } case option::improvability_screening_mode::Intensive: { if (m_model_ptr->is_feasible()) { accept_all = false; accept_objective_improvable = true; accept_feasibility_improvable = false; } else { if (STATE.iteration == 0) { m_model_ptr ->reset_variable_feasibility_improvabilities(); m_model_ptr ->update_variable_feasibility_improvabilities(); } else { const auto CHANGED_CONSTRAINT_PTRS = utility::to_vector( STATE.current_move.related_constraint_ptrs); m_model_ptr->reset_variable_feasibility_improvabilities( CHANGED_CONSTRAINT_PTRS); m_model_ptr ->update_variable_feasibility_improvabilities( CHANGED_CONSTRAINT_PTRS); } accept_all = false; accept_objective_improvable = false; accept_feasibility_improvable = true; } break; } default: { throw std::logic_error(utility::format_error_location( __FILE__, __LINE__, __func__, "The specified improvability screening mode is " "invalid.")); } } m_model_ptr->neighborhood().update_moves( accept_all, // accept_objective_improvable, // accept_feasibility_improvable, // m_option.is_enabled_parallel_neighborhood_update); m_state_manager.set_number_of_moves( m_model_ptr->neighborhood().move_ptrs().size()); } /*************************************************************************/ inline void curtail_moves(void) { m_state_manager.set_number_of_moves(static_cast<int>( floor(m_option.tabu_search.move_preserve_rate * m_model_ptr->neighborhood().move_ptrs().size()))); } /*************************************************************************/ inline std::pair<int, bool> select_move( const std::vector<double>& a_TOTAL_SCORES, const std::vector<TabuSearchCoreMoveScore>& a_TRIAL_MOVE_SCORES, const std::vector<solution::SolutionScore>& a_TRIAL_SOLUTION_SCORES) { const auto& STATE = m_state_manager.state(); int selected_index = 0; bool is_aspirated = false; if (STATE.iteration < m_option.tabu_search.number_of_initial_modification) { /** * For diversification, the move for next solution will be * randomly selected for initial several iteration. */ selected_index = m_get_rand_mt() % STATE.number_of_moves; is_aspirated = false; return std::make_pair(selected_index, is_aspirated); } /** * The move for next solution will be determined by evaluations * of solutions and moves after the inital modifications. */ selected_index = utility::argmin(a_TOTAL_SCORES); is_aspirated = false; /** * A move which improves the augmented incumbent solution can be * accepted (optional). */ if (!m_option.tabu_search.ignore_tabu_if_global_incumbent) { return std::make_pair(selected_index, is_aspirated); } const int ARGMIN_GLOBAL_AUGMENTED_OBJECTIVE = solution::argmin_index_global_augmented_objective( a_TRIAL_SOLUTION_SCORES); if (a_TRIAL_SOLUTION_SCORES[ARGMIN_GLOBAL_AUGMENTED_OBJECTIVE] .global_augmented_objective + constant::EPSILON < m_incumbent_holder_ptr->global_augmented_incumbent_objective()) { selected_index = ARGMIN_GLOBAL_AUGMENTED_OBJECTIVE; if (!a_TRIAL_MOVE_SCORES[selected_index].is_permissible) { is_aspirated = true; } } return std::make_pair(selected_index, is_aspirated); } /*************************************************************************/ inline void update_memory( const neighborhood::Move<T_Variable, T_Expression>* a_move_ptr) { const auto& STATE = m_state_manager.state(); const int RANDOM_WIDTH = static_cast<int>(m_option.tabu_search.tabu_tenure_randomize_rate * STATE.tabu_tenure); m_memory_ptr->update(*a_move_ptr, // STATE.iteration, // RANDOM_WIDTH, // &m_get_rand_mt); } /*************************************************************************/ inline void update_chain_moves(void) { auto& STATE = m_state_manager.state(); if ((STATE.previous_move.sense == neighborhood::MoveSense::Binary && STATE.current_move.sense == neighborhood::MoveSense::Binary && STATE.previous_move.alterations.front().second != STATE.current_move.alterations.front().second) || (STATE.previous_move.sense == neighborhood::MoveSense::Chain && STATE.current_move.sense == neighborhood::MoveSense::Chain) || (STATE.previous_move.sense == neighborhood::MoveSense::TwoFlip && STATE.current_move.sense == neighborhood::MoveSense::TwoFlip)) { neighborhood::Move<T_Variable, T_Expression> chain_move; if (STATE.previous_move.alterations.front().first < STATE.current_move.alterations.front().first) chain_move = STATE.previous_move + STATE.current_move; else { chain_move = STATE.current_move + STATE.previous_move; } if (chain_move.overlap_rate > m_option.chain_move_overlap_rate_threshold && !neighborhood::has_duplicate_variable(chain_move)) { auto back_chain_move = chain_move; for (auto&& alteration : back_chain_move.alterations) { alteration.second = 1 - alteration.second; } m_model_ptr->neighborhood().chain().register_move(chain_move); m_model_ptr->neighborhood().chain().register_move( back_chain_move); } } } /*****************************************************************************/ inline void print_table_header(const bool a_IS_ENABLED_PRINT) { if (!a_IS_ENABLED_PRINT) { return; } utility::print( "---------+------------------------+----------------------+--------" "--------------", true); utility::print( "Iteration| Number of Neighborhoods| Current Solution |" " Incumbent Solution ", true); utility::print( " | All Feas. Perm. Impr. | Aug.Obj.(Penalty) | " " Aug.Obj. Feas.Obj ", true); utility::print( "---------+------------------------+----------------------+--------" "--------------", true); } /*************************************************************************/ inline void print_table_initial(const bool a_IS_ENABLED_PRINT) { if (!a_IS_ENABLED_PRINT) { return; } const auto& STATE = m_state_manager.state(); const auto SIGN = m_model_ptr->sign(); std::printf( " INITIAL | - - | %9.2e(%9.2e) | %9.2e %9.2e\n", STATE.current_solution_score.local_augmented_objective * SIGN, STATE.current_solution_score.is_feasible ? 0.0 : STATE.current_solution_score.local_penalty, // m_incumbent_holder_ptr->global_augmented_incumbent_objective() * SIGN, m_incumbent_holder_ptr->feasible_incumbent_objective() * SIGN); } /*************************************************************************/ inline void print_table_body(const bool a_IS_ENABLED_PRINT) { if (!a_IS_ENABLED_PRINT) { return; } const auto& STATE = m_state_manager.state(); const auto SIGN = m_model_ptr->sign(); char mark_special_neighborhood_move = ' '; char mark_current = ' '; char mark_global_augmented_incumbent = ' '; char mark_feasible_incumbent = ' '; if (STATE.current_move.is_special_neighborhood_move) { mark_special_neighborhood_move = 's'; } if (STATE.update_status & // solution::IncumbentHolderConstant:: STATUS_LOCAL_AUGMENTED_INCUMBENT_UPDATE) { mark_current = '!'; } if (STATE.update_status & // solution::IncumbentHolderConstant:: STATUS_GLOBAL_AUGMENTED_INCUMBENT_UPDATE) { mark_current = '#'; mark_global_augmented_incumbent = '#'; if (STATE.is_aspirated) { mark_current = '@'; mark_global_augmented_incumbent = '@'; } } if (STATE.update_status & // solution::IncumbentHolderConstant:: STATUS_FEASIBLE_INCUMBENT_UPDATE) { mark_current = '*'; mark_global_augmented_incumbent = '*'; mark_feasible_incumbent = '*'; if (STATE.is_aspirated) { mark_current = '@'; mark_global_augmented_incumbent = '@'; mark_feasible_incumbent = '@'; } } auto int_format = [](const int a_VALUE) { if (a_VALUE >= 100000) { return utility::to_string(a_VALUE / 1000, "%4dk"); } else { return utility::to_string(a_VALUE, "%5d"); } }; std::printf( // "%8d%c|%s %s %s %s |%c%9.2e(%9.2e) |%c%9.2e %c%9.2e\n", STATE.iteration, mark_special_neighborhood_move, // int_format(STATE.number_of_all_neighborhoods).c_str(), // int_format(STATE.number_of_feasible_neighborhoods).c_str(), // int_format(STATE.number_of_permissible_neighborhoods).c_str(), // int_format(STATE.number_of_improvable_neighborhoods).c_str(), // mark_current, // STATE.current_solution_score.local_augmented_objective * SIGN, // STATE.current_solution_score.is_feasible ? 0.0 : STATE.current_solution_score.local_penalty, // mark_global_augmented_incumbent, // m_incumbent_holder_ptr->global_augmented_incumbent_objective() * SIGN, mark_feasible_incumbent, // m_incumbent_holder_ptr->feasible_incumbent_objective() * SIGN); } /*************************************************************************/ inline void print_table_footer(const bool a_IS_ENABLED_PRINT) { if (!a_IS_ENABLED_PRINT) { return; } utility::print( "---------+------------------------+----------------------+--------" "--------------"); } public: /*************************************************************************/ TabuSearchCore(void) { this->initialize(); } /*************************************************************************/ TabuSearchCore(model::Model<T_Variable, T_Expression>* a_model_ptr, // const std::vector<multi_array::ValueProxy<T_Variable>>& // a_INITIAL_VARIABLE_VALUE_PROXIES, // solution::IncumbentHolder<T_Variable, T_Expression>* // a_incumbent_holder_ptr, // Memory<T_Variable, T_Expression>* a_memory_ptr, // const option::Option& a_OPION) { this->initialize(); this->setup(a_model_ptr, // a_INITIAL_VARIABLE_VALUE_PROXIES, // a_incumbent_holder_ptr, // a_memory_ptr, // a_OPION); } /*************************************************************************/ virtual ~TabuSearchCore(void) { /// nothing to do } /*************************************************************************/ inline void initialize(void) { m_model_ptr = nullptr; m_initial_variable_value_proxies.clear(); m_incumbent_holder_ptr = nullptr; m_memory_ptr = nullptr; m_option.initialize(); m_feasible_solutions.clear(); m_state_manager.initialize(); m_result.initialize(); m_get_rand_mt.seed(0); } /*************************************************************************/ inline void setup( // model::Model<T_Variable, T_Expression>* a_model_ptr, // const std::vector<multi_array::ValueProxy<T_Variable>>& // a_INITIAL_VARIABLE_VALUE_PROXIES, // solution::IncumbentHolder<T_Variable, T_Expression>* // a_incumbent_holder_ptr, // Memory<T_Variable, T_Expression>* a_memory_ptr, // const option::Option& a_OPTION) { m_model_ptr = a_model_ptr; m_initial_variable_value_proxies = a_INITIAL_VARIABLE_VALUE_PROXIES; m_incumbent_holder_ptr = a_incumbent_holder_ptr; m_memory_ptr = a_memory_ptr; m_option = a_OPTION; m_feasible_solutions.clear(); } /*************************************************************************/ inline void run(void) { /** * Start to measure computational time. */ utility::TimeKeeper time_keeper; time_keeper.set_start_time(); const auto& STATE = m_state_manager.state(); /** * Preprocess. */ this->preprocess(); /** * Prepare the move evaluator. */ TabuSearchCoreMoveEvaluator<T_Variable, T_Expression> move_evaluator( m_model_ptr, m_memory_ptr, m_option); std::vector<solution::SolutionScore> trial_solution_scores; std::vector<TabuSearchCoreMoveScore> trial_move_scores; std::vector<double> total_scores; /** * Print the header of optimization progress table and print the initial * solution status. */ utility::print_single_line(m_option.verbose >= option::verbose::Full); utility::print_message("Tabu Search starts.", m_option.verbose >= option::verbose::Full); print_table_header(m_option.verbose >= option::verbose::Full); print_table_initial(m_option.verbose >= option::verbose::Full); /** * Iterations start. */ m_state_manager.reset_iteration(); while (true) { m_state_manager.set_elapsed_time(time_keeper.clock()); /** * Terminate the loop if the time is over. */ if (this->satisfy_time_over_terminate_condition()) { break; } /** * Terminate the loop if the iteration is over. */ if (this->satisfy_iteration_over_terminate_condition()) { break; } /** * Terminate the loop if the objective value of the feasible * incumbent reaches the target value. */ if (this->satisfy_reach_target_terminate_condition()) { break; } /** * Terminate the loop if "early stop" condition is satisfied. */ if (this->satisfy_early_stop_terminate_condition()) { break; } /** * Update the moves. */ this->update_moves(); /** * Shuffle the moves. */ if (m_option.tabu_search.is_enabled_shuffle) { m_model_ptr->neighborhood().shuffle_moves(&m_get_rand_mt); } /** * Curtail moves (optional). */ if (m_option.tabu_search.is_enabled_move_curtail) { this->curtail_moves(); } /** * Terminate the loop if the optimal solution is found or there are * no improving moves. */ if (this->satisfy_optimal_or_no_move_terminate_condition()) { break; } /** * Reserve elements for vectors by the number of the moves. */ const auto& TRIAL_MOVE_PTRS = m_model_ptr->neighborhood().move_ptrs(); trial_solution_scores.resize(STATE.number_of_moves); trial_move_scores.resize(STATE.number_of_moves); total_scores.resize(STATE.number_of_moves); const auto NUMBER_OF_MOVES = STATE.number_of_moves; const auto CURRENT_SOLUTION_SCORE = STATE.current_solution_score; const auto ITERATION = STATE.iteration; const auto TABU_TENURE = STATE.tabu_tenure; const auto DURATION = ITERATION - TABU_TENURE; #ifdef _OPENMP #pragma omp parallel for if (m_option.is_enabled_parallel_evaluation) \ schedule(static) #endif for (auto i = 0; i < NUMBER_OF_MOVES; i++) { /** * The neighborhood solutions will be evaluated in parallel by * fast or ordinary(slow) evaluation methods. */ #ifndef _MPS_SOLVER if (m_model_ptr->is_enabled_fast_evaluation()) { #endif if (TRIAL_MOVE_PTRS[i]->is_univariable_move) { m_model_ptr->evaluate_single( &trial_solution_scores[i], // *TRIAL_MOVE_PTRS[i], // CURRENT_SOLUTION_SCORE); } else { m_model_ptr->evaluate_multi( // &trial_solution_scores[i], // *TRIAL_MOVE_PTRS[i], // CURRENT_SOLUTION_SCORE); } #ifndef _MPS_SOLVER } else { m_model_ptr->evaluate(&trial_solution_scores[i], // *TRIAL_MOVE_PTRS[i]); } #endif move_evaluator.evaluate(&trial_move_scores[i], // *TRIAL_MOVE_PTRS[i], // ITERATION, // DURATION); total_scores[i] = trial_solution_scores[i].local_augmented_objective + trial_move_scores[i].frequency_penalty + trial_move_scores[i].lagrangian_penalty; /** * If the move is "tabu", it will be set lower priorities in * selecting a move for the next solution. */ if (!trial_move_scores[i].is_permissible) { total_scores[i] += constant::LARGE_VALUE_50; } /** * If the move is special neighborhood moves, it must improves * objective or feasibility. */ if (TRIAL_MOVE_PTRS[i]->is_special_neighborhood_move && !(trial_solution_scores[i].is_objective_improvable || trial_solution_scores[i].is_feasibility_improvable)) { total_scores[i] += constant::LARGE_VALUE_100; } } /** * Select moves for the next solution. */ const auto SELECT_RESULT = this->select_move( total_scores, trial_move_scores, trial_solution_scores); const auto SELECTED_INDEX = SELECT_RESULT.first; const auto IS_ASPIRATED = SELECT_RESULT.second; /** * Update the model by the selected move. */ auto move_ptr = TRIAL_MOVE_PTRS[SELECTED_INDEX]; m_model_ptr->update(*move_ptr); /** * Update the memory. */ this->update_memory(move_ptr); /** * Update the state. */ m_state_manager.update(move_ptr, // SELECTED_INDEX, // IS_ASPIRATED, // trial_move_scores, // trial_solution_scores); /** * To avoid cycling, each special neighborhood can be used only once * in one tabu search loop. */ if (move_ptr->is_special_neighborhood_move) { move_ptr->is_available = false; } /** * Update the stored chain moves. */ if (STATE.iteration > 0 && m_option.is_enabled_chain_move) { this->update_chain_moves(); } /** * Store the current feasible solution. */ if (m_option.is_enabled_store_feasible_solutions && STATE.current_solution_score.is_feasible) { m_feasible_solutions.push_back( m_model_ptr->export_sparse_solution()); } /** * Print the optimization progress. */ if ((STATE.iteration % std::max(m_option.tabu_search.log_interval, 1)) == 0 || STATE.update_status > 0) { print_table_body(m_option.verbose >= option::verbose::Full); } /** * If the local penalty us sufficiently larger than objective * sensitivity, the current loop will be terminated and the local * penalty coefficients will be adjusted. */ if (m_option.tabu_search.is_enabled_automatic_break) { if (this->satisfy_penalty_coefficient_too_large_terminate_condition( trial_solution_scores)) { break; } } m_state_manager.next_iteration(); } /** * Print the footer of the optimization progress table. */ print_table_footer(m_option.verbose >= option::verbose::Full); /** * Postprocess. */ this->postprocess(); } /*************************************************************************/ inline constexpr model::Model<T_Variable, T_Expression>* model_ptr(void) { return m_model_ptr; } /*************************************************************************/ inline constexpr solution::IncumbentHolder<T_Variable, T_Expression>* incumbent_holder_ptr(void) { return m_incumbent_holder_ptr; } /*************************************************************************/ inline constexpr Memory<T_Variable, T_Expression>* memory_ptr(void) { return m_memory_ptr; } /*************************************************************************/ inline constexpr const std::vector< solution::SparseSolution<T_Variable, T_Expression>>& feasible_solutions(void) const { return m_feasible_solutions; } /*************************************************************************/ inline constexpr const TabuSearchCoreResult<T_Variable, T_Expression>& result(void) const { return m_result; } }; // namespace core } // namespace core } // namespace tabu_search } // namespace solver } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 9; /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; /*! \brief default constructor */ MetaInfo() = default; MetaInfo& operator=(MetaInfo const& that) { this->num_row_ = that.num_row_; this->num_col_ = that.num_col_; this->num_nonzero_ = that.num_nonzero_; this->labels_.Resize(that.labels_.Size()); this->labels_.Copy(that.labels_); this->group_ptr_ = that.group_ptr_; this->weights_.Resize(that.weights_.Size()); this->weights_.Copy(that.weights_); this->base_margin_.Resize(that.base_margin_.Size()); this->base_margin_.Copy(that.base_margin_); return *this; } /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0) : gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {} inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {} BatchIterator<T> begin() { return begin_iter_; } BatchIterator<T> end() { return BatchIterator<T>(nullptr); } private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_
reorder.c
// ----------------------------------------------------------------------------- // // "00_AccelGraph" // // ----------------------------------------------------------------------------- // Copyright (c) 2014-2019 All rights reserved // ----------------------------------------------------------------------------- // Author : Abdullah Mughrabi // Email : atmughra@ncsu.edu||atmughrabi@gmail.com // File : reorder.c // Create : 2019-06-21 17:15:17 // Revise : 2019-09-28 15:35:52 // Editor : Abdullah Mughrabi // ----------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <math.h> #include <omp.h> #include <stdint.h> #include "timer.h" #include "myMalloc.h" #include "graphConfig.h" #include "edgeList.h" #include "fixedPoint.h" #include "sortRun.h" #include "quantization.h" #include "mt19937.h" #include "vc_vector.h" #include "graphCSR.h" #include "reorder.h" uint32_t RegionAtomicDecrement(uint32_t *region) { uint32_t oldValue; uint32_t flag = 0; do { oldValue = *region; if(oldValue > 0) { if(__sync_bool_compare_and_swap(region, oldValue, (oldValue - 1))) { flag = 1; } } else { return 0; } } while(!flag); return 1; } void radixSortCountSortEdgesByRanks (uint32_t **pageRanksFP, uint32_t **pageRanksFPTemp, uint32_t **labels, uint32_t **labelsTemp, uint32_t radix, uint32_t buckets, uint32_t *buckets_count, uint32_t num_vertices) { uint32_t *tempPointer1 = NULL; uint32_t *tempPointer2 = NULL; uint32_t t = 0; uint32_t o = 0; uint32_t u = 0; uint32_t i = 0; uint32_t j = 0; uint32_t P = 1; // 32/8 8 bit radix needs 4 iterations uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t base = 0; #pragma omp parallel default(none) shared(P,pageRanksFP, pageRanksFPTemp,radix,labels,labelsTemp,buckets,buckets_count, num_vertices) firstprivate(t_id, offset_end,offset_start,base,i,j,t,u,o) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); } #pragma omp barrier offset_start = t_id * (num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (num_vertices / P) + (num_vertices % P) ; } else { offset_end = offset_start + (num_vertices / P); } //HISTOGRAM-KEYS for(i = 0; i < buckets; i++) { buckets_count[(t_id * buckets) + i] = 0; } for (i = offset_start; i < offset_end; i++) { u = (*pageRanksFP)[i]; t = (u >> (radix * 8)) & 0xff; buckets_count[(t_id * buckets) + t]++; } #pragma omp barrier // SCAN BUCKETS if(t_id == 0) { for(i = 0; i < buckets; i++) { for(j = 0 ; j < P; j++) { t = buckets_count[(j * buckets) + i]; buckets_count[(j * buckets) + i] = base; base += t; } } } #pragma omp barrier //RANK-AND-PERMUTE for (i = offset_start; i < offset_end; i++) /* radix sort */ { u = (*pageRanksFP)[i]; t = (u >> (radix * 8)) & 0xff; o = buckets_count[(t_id * buckets) + t]; (*pageRanksFPTemp)[o] = (*pageRanksFP)[i]; (*labelsTemp)[o] = (*labels)[i]; buckets_count[(t_id * buckets) + t]++; } } tempPointer1 = *labels; *labels = *labelsTemp; *labelsTemp = tempPointer1; tempPointer2 = *pageRanksFP; *pageRanksFP = *pageRanksFPTemp; *pageRanksFPTemp = tempPointer2; } uint32_t *radixSortEdgesByPageRank (float *pageRanks, uint32_t *labels, uint32_t num_vertices) { // printf("*** START Radix Sort Edges By Source *** \n"); // struct Graph* graph = graphNew(edgeList->num_vertices, edgeList->num_edges, inverse); // Do counting sort for every digit. Note that instead // of passing digit number, exp is passed. exp is 10^i // where i is current digit number uint32_t v; uint32_t radix = 4; // 32/8 8 bit radix needs 4 iterations uint32_t P = omp_get_max_threads(); // 32/8 8 bit radix needs 4 iterations uint32_t buckets = 256; // 2^radix = 256 buckets uint32_t *buckets_count = NULL; // omp_set_num_threads(P); uint32_t j = 0; //1,2,3 iteration uint32_t *pageRanksFP = NULL; uint32_t *pageRanksFPTemp = NULL; uint32_t *labelsTemp = NULL; buckets_count = (uint32_t *) my_malloc(P * buckets * sizeof(uint32_t)); pageRanksFP = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t)); pageRanksFPTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t)); labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t)); #pragma omp parallel for for(v = 0; v < num_vertices; v++) { pageRanksFP[v] = FloatToFixed32SORT(pageRanks[v]); pageRanksFPTemp[v] = 0; labelsTemp[v] = 0; } for(j = 0 ; j < radix ; j++) { radixSortCountSortEdgesByRanks (&pageRanksFP, &pageRanksFPTemp, &labels, &labelsTemp, j, buckets, buckets_count, num_vertices); } free(buckets_count); free(pageRanksFP); free(pageRanksFPTemp); free(labelsTemp); // for(v = 0; v < num_vertices; v++) // { // printf("rank %u label %u pr %.22f \n",v, labelsInternal[v], pageRanks[labelsInternal[v]]); // } return labels; } uint32_t *radixSortEdgesByDegree (uint32_t *degrees, uint32_t *labels, uint32_t num_vertices) { // printf("*** START Radix Sort Edges By Source *** \n"); // struct Graph* graph = graphNew(edgeList->num_vertices, edgeList->num_edges, inverse); // Do counting sort for every digit. Note that instead // of passing digit number, exp is passed. exp is 10^i // where i is current digit number uint32_t radix = 4; // 32/8 8 bit radix needs 4 iterations uint32_t P = omp_get_max_threads(); // 32/8 8 bit radix needs 4 iterations uint32_t buckets = 256; // 2^radix = 256 buckets uint32_t *buckets_count = NULL; // omp_set_num_threads(P); uint32_t j = 0; //1,2,3 iteration uint32_t *degreesTemp = NULL; uint32_t *labelsTemp = NULL; buckets_count = (uint32_t *) my_malloc(P * buckets * sizeof(uint32_t)); degreesTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t)); labelsTemp = (uint32_t *) my_malloc(num_vertices * sizeof(uint32_t)); #pragma omp parallel for (j = 0; j < num_vertices; ++j) { labelsTemp[j] = 0; degreesTemp[j] = 0; } for(j = 0 ; j < radix ; j++) { radixSortCountSortEdgesByRanks (&degrees, &degreesTemp, &labels, &labelsTemp, j, buckets, buckets_count, num_vertices); } free(buckets_count); free(degreesTemp); free(labelsTemp); return labels; } // ******************************************************************************************** // *************** Degree relabel ************** // ******************************************************************************************** struct EdgeList *reorderGraphProcessDegree( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode) { uint32_t i; uint32_t *degrees; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); #pragma omp parallel for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } degrees = reorderGraphGenerateInOutDegrees( degrees, edgeList, lmode); edgeList = reorderGraphListDegree( edgeList, degrees, lmode); free(degrees); return edgeList; } struct EdgeList *reorderGraphListDegree(struct EdgeList *edgeList, uint32_t *degrees, uint32_t lmode) { uint32_t v; uint32_t *labelsInverse; uint32_t *labels; struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); labelsInverse = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Starting Degree Reordering/Relabeling"); printf(" -----------------------------------------------------\n"); switch(lmode) { case 1 : printf("| %-51s | \n", "OUT-DEGREE"); break; case 2 : printf("| %-51s | \n", "IN-DEGREE"); break; case 3 : printf("| %-51s | \n", "(IN+OUT)-DEGREE"); break; case 10 : printf("| %-51s | \n", "RANDOM-DEGREE"); break; default : printf("| %-51s | \n", "OUT-DEGREE"); } printf(" -----------------------------------------------------\n"); Start(timer); #pragma omp parallel for for(v = 0; v < edgeList->num_vertices; v++) { labelsInverse[v] = v; } labelsInverse = radixSortEdgesByDegree(degrees, labelsInverse, edgeList->num_vertices); #pragma omp parallel for for(v = 0; v < edgeList->num_vertices; v++) { labels[labelsInverse[v]] = edgeList->num_vertices - 1 - v; } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reordering Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); Start(timer); edgeList = relabelEdgeList(edgeList, labels); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } free(timer); free(labelsInverse); free(labels); return edgeList; } // ******************************************************************************************** // *************** DBG relabel ************** // ******************************************************************************************** struct EdgeList *reorderGraphProcessDBG( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode) { // UINT32_MAX uint32_t i; uint32_t *degrees; uint32_t *thresholds; uint32_t num_buckets = 11; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); #pragma omp parallel for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } // START initialize thresholds if(edgeList->avg_degree <= 1) thresholds[0] = 1; else thresholds[0] = (edgeList->avg_degree / 2); for ( i = 1; i < (num_buckets - 1); ++i) { thresholds[i] = thresholds[i - 1] * 2; } thresholds[num_buckets - 1] = UINT32_MAX; // END initialize thresholds switch(lmode) { case 4 : printf("| %-51s | \n", "DBG OUT-DEGREE"); break; case 5 : printf("| %-51s | \n", "DBG IN-DEGREE"); break; default : printf("| %-51s | \n", "DBG OUT-DEGREE"); } degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode); edgeList = reorderGraphListDBG(edgeList, degrees, thresholds, num_buckets, lmode); free(thresholds); free(degrees); return edgeList; } struct EdgeList *reorderGraphListDBG(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode) { uint32_t i = 0; int32_t j = 0; int32_t k = 0; void *iter = 0; uint32_t v = 0; uint32_t t = 0; uint32_t temp_idx = 0; uint32_t P = 1; uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t *start_idx = NULL; vc_vector **buckets = NULL; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); Start(timer); #pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t)); buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *)); } #pragma omp barrier for (i = 0; i < num_buckets; ++i) { buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL); } offset_start = t_id * (edgeList->num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ; } else { offset_end = offset_start + (edgeList->num_vertices / P); } for (v = offset_start; v < offset_end; ++v) { for ( i = 0; i < num_buckets; ++i) { if(degrees[v] <= thresholds[i]) { vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v); break; } } } #pragma omp barrier if(t_id == 0) { for ( j = num_buckets - 1; j >= 0; --j) { for (t = 0; t < P; ++t) { start_idx[(t * num_buckets) + j] = temp_idx; temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]); } } } #pragma omp barrier for ( j = num_buckets - 1 ; j >= 0 ; --j) { k = start_idx[(t_id * num_buckets) + j]; for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]); iter != vc_vector_end(buckets[(t_id * num_buckets) + j]); iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter)) { labels[(*(uint32_t *)iter)] = k++; } } } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reordering Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); Start(timer); edgeList = relabelEdgeList(edgeList, labels); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } for (i = 0; i < (P * num_buckets); ++i) { vc_vector_release(buckets[i]); } free(timer); free(buckets); free(start_idx); free(labels); return edgeList; } // ******************************************************************************************** // *************** Corder relabel ************** // ******************************************************************************************** struct EdgeList *reorderGraphProcessCorder( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode) { // UINT32_MAX uint32_t i; uint32_t *degrees; uint32_t *thresholds; uint32_t num_buckets = 11; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); #pragma omp parallel for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } // START initialize thresholds if(edgeList->avg_degree <= 1) thresholds[0] = 1; else thresholds[0] = (edgeList->avg_degree / 2); for ( i = 1; i < (num_buckets - 1); ++i) { thresholds[i] = thresholds[i - 1] * 2; } thresholds[num_buckets - 1] = UINT32_MAX; // END initialize thresholds switch(lmode) { case 12 : printf("| %-51s | \n", "Corder OUT-DEGREE"); break; case 13 : printf("| %-51s | \n", "Corder IN-DEGREE"); break; default : printf("| %-51s | \n", "Corder OUT-DEGREE"); } degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode); edgeList = reorderGraphListDBG(edgeList, degrees, thresholds, num_buckets, lmode); free(thresholds); free(degrees); return edgeList; } struct EdgeList *reorderGraphListCorder(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode) { uint32_t i = 0; int32_t j = 0; int32_t k = 0; void *iter = 0; uint32_t v = 0; uint32_t t = 0; uint32_t temp_idx = 0; uint32_t P = 1; uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t *start_idx = NULL; vc_vector **buckets = NULL; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); Start(timer); #pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t)); buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *)); } #pragma omp barrier for (i = 0; i < num_buckets; ++i) { buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL); } offset_start = t_id * (edgeList->num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ; } else { offset_end = offset_start + (edgeList->num_vertices / P); } for (v = offset_start; v < offset_end; ++v) { for ( i = 0; i < num_buckets; ++i) { if(degrees[v] <= thresholds[i]) { vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v); break; } } } #pragma omp barrier if(t_id == 0) { for ( j = num_buckets - 1; j >= 0; --j) { for (t = 0; t < P; ++t) { start_idx[(t * num_buckets) + j] = temp_idx; temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]); } } } #pragma omp barrier for ( j = num_buckets - 1 ; j >= 0 ; --j) { k = start_idx[(t_id * num_buckets) + j]; for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]); iter != vc_vector_end(buckets[(t_id * num_buckets) + j]); iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter)) { labels[(*(uint32_t *)iter)] = k++; } } } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reordering Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); Start(timer); edgeList = relabelEdgeList(edgeList, labels); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } for (i = 0; i < (P * num_buckets); ++i) { vc_vector_release(buckets[i]); } free(timer); free(buckets); free(start_idx); free(labels); return edgeList; } // ******************************************************************************************** // *************** HUBSort relabel ************** // ******************************************************************************************** struct EdgeList *reorderGraphProcessHUBSort( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode) { // UINT32_MAX uint32_t i; uint32_t *degrees; uint32_t *thresholds; uint32_t num_buckets = 2; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); #pragma omp parallel for for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } // START initialize thresholds if(edgeList->avg_degree <= 1) thresholds[0] = 1; else thresholds[0] = (edgeList->avg_degree / 2); for ( i = 1; i < (num_buckets - 1); ++i) { thresholds[i] = thresholds[i - 1] * 2; } thresholds[num_buckets - 1] = UINT32_MAX; // END initialize thresholds switch(lmode) { case 6 : printf("| %-51s | \n", "HUBSort OUT-DEGREE"); break; case 7 : printf("| %-51s | \n", "HUBSort IN-DEGREE"); break; default : printf("| %-51s | \n", "HUBSort OUT-DEGREE"); } degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode); edgeList = reorderGraphListHUBSort(edgeList, degrees, thresholds, num_buckets, lmode); free(thresholds); free(degrees); return edgeList; } struct EdgeList *reorderGraphListHUBSort(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode) { uint32_t i = 0; int32_t j = 0; int32_t k = 0; void *iter = 0; uint32_t v = 0; uint32_t t = 0; uint32_t temp_idx = 0; uint32_t P = 1; uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t *start_idx = NULL; vc_vector **buckets = NULL; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); uint32_t *sizeHot = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); uint32_t **degreesHot = (uint32_t **) my_malloc(num_buckets * sizeof(uint32_t *)); uint32_t **verticesHot = (uint32_t **) my_malloc(num_buckets * sizeof(uint32_t *)); Start(timer); #pragma omp parallel default(none) shared(P,verticesHot,degreesHot,sizeHot,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t)); buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *)); } #pragma omp barrier for (i = 0; i < num_buckets; ++i) { buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL); } offset_start = t_id * (edgeList->num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ; } else { offset_end = offset_start + (edgeList->num_vertices / P); } for (v = offset_start; v < offset_end; ++v) { for ( i = 0; i < num_buckets; ++i) { if(degrees[v] <= thresholds[i]) { vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v); break; } } } #pragma omp barrier if(t_id == 0) { for ( j = num_buckets - 1; j >= 0; --j) { temp_idx = 0; for (t = 0; t < P; ++t) { start_idx[(t * num_buckets) + j] = temp_idx; temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]); } sizeHot[j] = temp_idx; degreesHot[j] = (uint32_t *) my_malloc(sizeHot[j] * sizeof(uint32_t)); verticesHot[j] = (uint32_t *) my_malloc(sizeHot[j] * sizeof(uint32_t)); } } #pragma omp barrier for ( j = num_buckets - 1 ; j >= 0 ; --j) { k = start_idx[(t_id * num_buckets) + j]; for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]); iter != vc_vector_end(buckets[(t_id * num_buckets) + j]); iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter)) { verticesHot[j][k] = (*(uint32_t *)iter); degreesHot[j][k] = degrees[(*(uint32_t *)iter)]; k++; } } } verticesHot[num_buckets - 1] = radixSortEdgesByDegree(degreesHot[num_buckets - 1], verticesHot[num_buckets - 1], sizeHot[num_buckets - 1]); #pragma omp parallel for for(v = 0; v < sizeHot[1]; v++) { labels[verticesHot[1][v]] = sizeHot[1] - 1 - v; } #pragma omp parallel for for(v = 0; v < sizeHot[0]; v++) { labels[verticesHot[0][v]] = sizeHot[1] + (v); } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reordering Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); Start(timer); edgeList = relabelEdgeList(edgeList, labels); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } for (i = 0; i < (P * num_buckets); ++i) { vc_vector_release(buckets[i]); } for (i = 0; i < num_buckets; ++i) { free(degreesHot[i]); free(verticesHot[i]); } free(degreesHot); free(verticesHot); free(sizeHot); free(timer); free(buckets); free(start_idx); free(labels); return edgeList; } // ******************************************************************************************** // *************** HUBCluster relabel ************** // ******************************************************************************************** struct EdgeList *reorderGraphProcessHUBCluster( uint32_t sort, struct EdgeList *edgeList, uint32_t lmode) { // UINT32_MAX uint32_t i; uint32_t *degrees; uint32_t *thresholds; uint32_t num_buckets = 2; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); #pragma omp parallel for for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } // START initialize thresholds thresholds[0] = (edgeList->avg_degree); thresholds[num_buckets - 1] = UINT32_MAX; // END initialize thresholds switch(lmode) { case 8 : printf("| %-51s | \n", "HUBCluster OUT-DEGREE"); break; case 9 : printf("| %-51s | \n", "HUBCluster IN-DEGREE"); break; default : printf("| %-51s | \n", "HUBCluster OUT-DEGREE"); } degrees = reorderGraphGenerateInOutDegrees(degrees, edgeList, lmode); edgeList = reorderGraphListHUBCluster(edgeList, degrees, thresholds, num_buckets, lmode); free(thresholds); free(degrees); return edgeList; } struct EdgeList *reorderGraphListHUBCluster(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t lmode) { uint32_t i = 0; int32_t j = 0; int32_t k = 0; void *iter = 0; uint32_t v = 0; uint32_t t = 0; uint32_t temp_idx = 0; uint32_t P = 1; uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t *start_idx = NULL; vc_vector **buckets = NULL; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); Start(timer); #pragma omp parallel default(none) shared(P,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,k,offset_start,offset_end,t_id,i,j,v,t) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t)); buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *)); } #pragma omp barrier for (i = 0; i < num_buckets; ++i) { buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL); } offset_start = t_id * (edgeList->num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ; } else { offset_end = offset_start + (edgeList->num_vertices / P); } for (v = offset_start; v < offset_end; ++v) { for ( i = 0; i < num_buckets; ++i) { if(degrees[v] <= thresholds[i]) { vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v); break; } } } #pragma omp barrier if(t_id == 0) { for ( j = num_buckets - 1; j >= 0; --j) { for (t = 0; t < P; ++t) { start_idx[(t * num_buckets) + j] = temp_idx; temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]); } } } #pragma omp barrier for ( j = num_buckets - 1 ; j >= 0 ; --j) { k = start_idx[(t_id * num_buckets) + j]; for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]); iter != vc_vector_end(buckets[(t_id * num_buckets) + j]); iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter)) { labels[(*(uint32_t *)iter)] = k++; } } } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reordering Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); Start(timer); edgeList = relabelEdgeList(edgeList, labels); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } for (i = 0; i < (P * num_buckets); ++i) { vc_vector_release(buckets[i]); } free(timer); free(buckets); free(start_idx); free(labels); return edgeList; } // ******************************************************************************************** // *************** AccelGraph label-Masking ************** // ******************************************************************************************** struct EdgeList *maskGraphProcess(struct EdgeList *edgeList, struct Arguments *arguments) { struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); printf(" *****************************************************\n"); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Mask Process"); printf(" -----------------------------------------------------\n"); Start(timer); uint32_t cache_size = ((arguments->cache_size) >> 2); switch(arguments->lmode) { case 1 : case 2 : case 3 : case 4 : edgeList = maskGraphProcessDegree( edgeList, arguments->mmode, cache_size); // degree break; default : edgeList = maskGraphProcessDegree( edgeList, arguments->mmode, cache_size); // out-degree } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Total Mask Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); printf(" *****************************************************\n"); free(timer); return edgeList; } struct EdgeList *maskGraphProcessDegree( struct EdgeList *edgeList, uint32_t mmode, uint32_t cache_size) { // UINT32_MAX uint32_t i; uint32_t *degrees; uint32_t *thresholds; uint32_t num_buckets = 11; degrees = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); thresholds = (uint32_t *) my_malloc(num_buckets * sizeof(uint32_t)); #pragma omp parallel for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = 0; } // START initialize thresholds if(edgeList->avg_degree <= 1) thresholds[0] = 1; else thresholds[0] = (edgeList->avg_degree / 2); for ( i = 1; i < (num_buckets - 1); ++i) { thresholds[i] = thresholds[i - 1] * 2; } thresholds[num_buckets - 1] = UINT32_MAX; // END initialize thresholds switch(mmode) { case 1 : printf("| %-51s | \n", "Vertex Property OUT-DEGREE"); break; case 2 : printf("| %-51s | \n", "Vertex Structure IN-DEGREE"); break; case 3 : printf("| %-51s | \n", "Vertex Property OUT-DEGREE"); break; case 4 : printf("| %-51s | \n", "Vertex Structure IN-DEGREE"); break; default : printf("| %-51s | \n", "Vertex Property OUT-DEGREE"); } degrees = maskGraphProcessGenerateInOutDegrees(degrees, edgeList, mmode); edgeList = maskGraphProcessGenerateMaskArray(edgeList, degrees, thresholds, num_buckets, mmode, cache_size); free(thresholds); free(degrees); return edgeList; } uint32_t *maskGraphProcessGenerateInOutDegrees(uint32_t *degrees, struct EdgeList *edgeList, uint32_t mmode) { uint32_t i; uint32_t src; uint32_t dest; #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,mmode) for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; switch(mmode) { case 1 : case 3 : { #pragma omp atomic update degrees[src]++; } break; case 2 : case 4 : { #pragma omp atomic update degrees[dest]++; } break; case 5 : case 6 : { #pragma omp atomic update degrees[dest]++; #pragma omp atomic update degrees[src]++; } break; default : { #pragma omp atomic update degrees[src]++; }// out-degree } } return degrees; } struct EdgeList *maskGraphProcessGenerateMaskArray(struct EdgeList *edgeList, uint32_t *degrees, uint32_t *thresholds, uint32_t num_buckets, uint32_t mmode, uint32_t cache_size) { uint32_t i = 0; int32_t j = 0; void *iter = 0; uint32_t v = 0; uint32_t t = 0; uint32_t temp_idx = 0; uint32_t P = 1; uint32_t t_id = 0; uint32_t offset_start = 0; uint32_t offset_end = 0; uint32_t num_masks = 4; uint32_t *start_idx = NULL; vc_vector **buckets = NULL; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); uint32_t *mask_array = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); uint32_t *cache_regions = (uint32_t *) my_malloc(num_masks * sizeof(uint32_t)); int diff = (int)edgeList->num_vertices - (int)cache_size; if(diff < (2 * (int)cache_size)) { cache_regions[0] = edgeList->num_vertices / 3; // VERTEX_VALUE_HOT_U32 cache_regions[1] = edgeList->num_vertices / 3; // VERTEX_CACHE_WARM_U32 cache_regions[2] = edgeList->num_vertices / 3; // VERTEX_VALUE_LUKEWARM_U32 } else { cache_regions[0] = cache_size*8; // VERTEX_VALUE_HOT_U32 cache_regions[1] = cache_regions[0]*4; // VERTEX_CACHE_WARM_U32 cache_regions[2] = cache_regions[1]*4; // VERTEX_VALUE_LUKEWARM_U32 } cache_regions[3] = UINT32_MAX; // VERTEX_CACHE_COLD_U32 #pragma omp parallel for for (i = 0; i < edgeList->num_vertices; ++i) { mask_array[i] = VERTEX_CACHE_COLD_U32; } Start(timer); #pragma omp parallel default(none) shared(P,mask_array,mmode,cache_regions,labels,buckets,edgeList,num_buckets,degrees,thresholds,start_idx) firstprivate(iter,temp_idx,offset_start,offset_end,t_id,i,j,v,t) { t_id = omp_get_thread_num(); if(t_id == 0) { P = omp_get_num_threads(); start_idx = (uint32_t *) my_malloc(P * num_buckets * sizeof(uint32_t)); buckets = (vc_vector **) malloc(P * num_buckets * sizeof(vc_vector *)); } #pragma omp barrier for (i = 0; i < num_buckets; ++i) { buckets[(t_id * num_buckets) + i] = vc_vector_create(0, sizeof(uint32_t), NULL); } offset_start = t_id * (edgeList->num_vertices / P); if(t_id == (P - 1)) { offset_end = offset_start + (edgeList->num_vertices / P) + (edgeList->num_vertices % P) ; } else { offset_end = offset_start + (edgeList->num_vertices / P); } for (v = offset_start; v < offset_end; ++v) { for ( i = 0; i < num_buckets; ++i) { if(degrees[v] <= thresholds[i]) { vc_vector_push_back(buckets[(t_id * num_buckets) + i], &v); break; } } } #pragma omp barrier if(t_id == 0) { for ( j = num_buckets - 1; j >= 0; --j) { for (t = 0; t < P; ++t) { start_idx[(t * num_buckets) + j] = temp_idx; temp_idx += vc_vector_count(buckets[(t * num_buckets) + j]); } } } #pragma omp barrier for ( j = num_buckets - 1 ; j >= 0 ; --j) { for ( iter = vc_vector_begin(buckets[(t_id * num_buckets) + j]); iter != vc_vector_end(buckets[(t_id * num_buckets) + j]); iter = vc_vector_next(buckets[(t_id * num_buckets) + j], iter)) { if(RegionAtomicDecrement(&(cache_regions[0]))) { mask_array[(*(uint32_t *)iter)] = VERTEX_VALUE_HOT_U32; } else if(RegionAtomicDecrement(&(cache_regions[1]))) { mask_array[(*(uint32_t *)iter)] = VERTEX_CACHE_WARM_U32; } else if(RegionAtomicDecrement(&(cache_regions[2]))) { mask_array[(*(uint32_t *)iter)] = VERTEX_VALUE_LUKEWARM_U32; } else { mask_array[(*(uint32_t *)iter)] = VERTEX_CACHE_COLD_U32; } } } } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Mask Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); if(mmode == 1 || mmode == 2) { Start(timer); edgeList = maskEdgeList(edgeList, mask_array); Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Relabeling Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); } #pragma omp parallel for for (i = 0; i < edgeList->num_vertices; ++i) { edgeList->mask_array[i] = mask_array[i]; } for (i = 0; i < (P * num_buckets); ++i) { vc_vector_release(buckets[i]); } free(mask_array); free(timer); free(buckets); free(start_idx); free(labels); free(cache_regions); return edgeList; } // ******************************************************************************************** // *************** generic functions ************** // ******************************************************************************************** uint32_t *reorderGraphGenerateInOutDegrees(uint32_t *degrees, struct EdgeList *edgeList, uint32_t lmode) { uint32_t i; uint32_t src; uint32_t dest; if(lmode != 10) { #pragma omp parallel for default(none) private(i,src,dest) shared(edgeList,degrees,lmode) for(i = 0; i < edgeList->num_edges; i++) { src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; switch(lmode) { case 1 : case 4 : case 6 : case 8 : { #pragma omp atomic update degrees[src]++; } // degree break; case 2 : case 5 : case 7 : case 9 : { #pragma omp atomic update degrees[dest]++; } break; case 3 : { #pragma omp atomic update degrees[dest]++; #pragma omp atomic update degrees[src]++; } break; default : { #pragma omp atomic update degrees[src]++; }// out-degree } } } if(lmode == 10) { mt19937state *mt19937var = (mt19937state *) my_malloc(sizeof(mt19937state)); initializeMersenneState (mt19937var, 27491095); #pragma omp parallel for firstprivate(mt19937var) for (i = 0; i < edgeList->num_vertices; ++i) { degrees[i] = (generateRandInt(mt19937var) % edgeList->num_vertices) + omp_get_thread_num(); } free(mt19937var); } return degrees; } struct EdgeList *reorderGraphProcess(struct EdgeList *edgeList, struct Arguments *arguments) { struct Timer *timer = (struct Timer *) malloc(sizeof(struct Timer)); // printf("Filename : %s \n",fnameb); printf(" *****************************************************\n"); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Reorder Process"); printf(" -----------------------------------------------------\n"); Start(timer); switch(arguments->lmode) { case 1 : case 2 : case 3 : case 10 : edgeList = reorderGraphProcessDegree( arguments->sort, edgeList, arguments->lmode);// degree break; case 4 : case 5 : edgeList = reorderGraphProcessDBG( arguments->sort, edgeList, arguments->lmode);// DBG break; case 6 : case 7 : edgeList = reorderGraphProcessHUBSort( arguments->sort, edgeList, arguments->lmode);// HUBSort break; case 8 : case 9 : edgeList = reorderGraphProcessHUBCluster( arguments->sort, edgeList, arguments->lmode);// HUBCluster break; case 11 : edgeList = relabelEdgeListFromFile(edgeList, arguments->fnamel, edgeList->num_vertices);// load from file break; case 12 : case 13 : edgeList = reorderGraphProcessCorder( arguments->sort, edgeList, arguments->lmode);// Corder break; default : edgeList = reorderGraphProcessDegree( arguments->sort, edgeList, arguments->lmode);// out-degree } Stop(timer); printf(" -----------------------------------------------------\n"); printf("| %-51s | \n", "Total Reorder Complete"); printf(" -----------------------------------------------------\n"); printf("| %-51f | \n", Seconds(timer)); printf(" -----------------------------------------------------\n"); printf(" *****************************************************\n"); free(timer); return edgeList; } struct EdgeList *relabelEdgeList(struct EdgeList *edgeList, uint32_t *labels) { uint32_t i; #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { uint32_t src; uint32_t dest; src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; edgeList->edges_array_src[i] = labels[src]; edgeList->edges_array_dest[i] = labels[dest]; } return edgeList; } struct EdgeList *maskEdgeList(struct EdgeList *edgeList, uint32_t *mask_array) { uint32_t i; #pragma omp parallel for for(i = 0; i < edgeList->num_edges; i++) { uint32_t src; uint32_t dest; src = edgeList->edges_array_src[i]; dest = edgeList->edges_array_dest[i]; edgeList->edges_array_src[i] = src | mask_array[src]; edgeList->edges_array_dest[i] = dest | mask_array[dest]; } return edgeList; } // ******************************************************************************************** // *************** File relabel ************** // ******************************************************************************************** struct EdgeList *relabelEdgeListFromFile(struct EdgeList *edgeList, const char *fnameb, uint32_t size) { FILE *pText; uint32_t i; uint32_t v = 0; uint32_t dest = 0; uint32_t x = 0; uint32_t *labels = (uint32_t *) my_malloc(edgeList->num_vertices * sizeof(uint32_t)); // char *fname_txt = (char *) malloc((strlen(fnameb) + 10) * sizeof(char)); // fname_txt = strcpy (fname_txt, fnameb); printf("%s\n", fnameb ); pText = fopen(fnameb, "r"); if (pText == NULL) { return NULL; } while (1) { i = fscanf(pText, "%u\n", &dest); labels[x] = dest; x++; if( x == edgeList->num_vertices ) break; if( i == EOF ) break; } fclose(pText); edgeList = relabelEdgeList(edgeList, labels); #pragma omp parallel for for (v = 0; v < edgeList->num_vertices; ++v) { edgeList->label_array[v] = labels[edgeList->label_array[v]]; edgeList->inverse_label_array[edgeList->label_array[v]] = v; } free(labels); // free(fname_txt); return edgeList; } void writeLabelsToFile(const char *fnameb, uint32_t *labels, uint32_t size) { FILE *fptr; uint32_t x; fptr = fopen(fnameb, "w"); for(x = 0; x < size; x++) { fprintf(fptr, "%u %u\n", x, labels[x]); } fclose(fptr); }
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N RRR L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % John Cristy % % December 2003 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(combine_image,sRGBColorspace); if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *restrict p; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) image->matte=MagickTrue; /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) image->matte=MagickFalse; (void) SetImageColorspace(image,GRAYColorspace); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel, % OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel, % ShapeAlphaChannel, and TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case BackgroundAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
set_value_x_coo.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(ALPHA_SPMAT_COO *A, const ALPHA_INT row, const ALPHA_INT col, const ALPHA_Number value) { ALPHA_INT num_thread = alpha_get_thread_num(); ALPHA_INT find = 0; #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) reduction(+:find) #endif for(ALPHA_INT ai = 0; ai < A->nnz; ++ai) if(A->row_indx[ai] == row && A->col_indx[ai] == col) { A->values[ai] = value; find ++; ai = A->nnz; } if(find) return ALPHA_SPARSE_STATUS_SUCCESS; else return ALPHA_SPARSE_STATUS_INVALID_VALUE; }
GB_binop__bset_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__bset_uint16 // A.*B function (eWiseMult): GB_AemultB__bset_uint16 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__bset_uint16 // C+=b function (dense accum): GB_Cdense_accumb__bset_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_uint16 // C=scalar+B GB_bind1st__bset_uint16 // C=scalar+B' GB_bind1st_tran__bset_uint16 // C=A+scalar GB_bind2nd__bset_uint16 // C=A'+scalar GB_bind2nd_tran__bset_uint16 // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = GB_BITSET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_BITSET (x, y, uint16_t, 16) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT16 || GxB_NO_BSET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__bset_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__bset_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__bset_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__bset_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__bset_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__bset_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = GB_BITSET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__bset_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = GB_BITSET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (x, aij, uint16_t, 16) ; \ } GrB_Info GB_bind1st_tran__bset_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = GB_BITSET (aij, y, uint16_t, 16) ; \ } GrB_Info GB_bind2nd_tran__bset_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
daxpy.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #define N 20480 int main(void) { double *x, *y; size_t size = N*sizeof(double); x = (double *)malloc(size); y = (double *)malloc(size); // initialize x and y srand(time(NULL)); double a = (double)random() / RAND_MAX; int i; for (i=0; i<N; i++) x[i] = (double)random() / RAND_MAX; for (i=0; i<N; i++) y[i] = (double)random() / RAND_MAX; // compute axpy on the host CPU cores double *yomp; yomp = (double *)malloc(size); #pragma omp parallel for for (i=0; i<N; i++) { yomp[i] = a * x[i] + y[i]; } // compute axpy on the accelerator double *yacc; yacc = (double *)malloc(size); #pragma acc kernels copyin(x[0:N],y[0:N]), copyout(yacc[0:N]) for (i=0; i<N; i++) { yacc[i] = a*x[i] + y[i]; } // verify the results double m = -1.; double tmp; #pragma omp parallel for private(tmp) reduction(max:m) for (i=0; i<N; i++) { tmp = fabs( (yacc[i]-yomp[i])/yomp[i] ); if ( tmp > m ) m = tmp; } // release memory free(x); free(y); free(yomp); free(yacc); if ( m < 1E-12 ) { printf("Success!\n"); return 0; } else { printf("Failure!\n"); return 1; } }
common_functions.h
/* * Evan Lezar * 18 November 2010 * * Post processing of the eigenvalues calcuated using the ARPACK-based solvers routines * */ void apply_shift ( int N, float* S, float* T, int LDMAT, float shift ) { // T = T - shift*S checkpoint t0 = tic(); int row, col; #pragma omp parallel default(shared) private(row, col) { #pragma omp for schedule(runtime) for ( col = 0; col < N; ++col ) for ( row = 0; row < N; ++row ) T[col*LDMAT + row] = T[col*LDMAT + row] - shift*S[col*LDMAT + row]; } } struct float_complex { float x, y; }; typedef struct float_complex float_complex; inline float_complex make_float_complex ( float x, float y ) { float_complex c = {x, y}; return c; } float_complex invert ( float_complex v ) { float_complex l; l.x = v.x/(v.x*v.x + v.y*v.y); l.y = -v.y/(v.x*v.x + v.y*v.y); return l; } // return v / ( 1.0 + v*shift ); float_complex unshift ( float_complex v, float shift ) { float_complex l = v; // calculate ( 1.0 + v*shift ) l.x = l.x*shift + 1.0; l.y *= shift; float_complex result; // calculate v/l result.x = ( v.x*l.x + v.y*l.y ) / ( l.x*l.x + l.y*l.y ); result.y = ( v.y*l.x - v.x*l.y ) / ( l.x*l.x + l.y*l.y ); return result; } float eigenvalue_magnitude ( const float real, const float imag ) { return sqrt ( real*real + imag*imag ); } int insert_smallest_into_list ( const float* value_list, int* index_list, const int N, const float value, const int index ) { if ( isnan(value) | isinf(value) ) { return -1; } if ( index == 0 ) { // first element to insert index_list[0] = 0; return 0; } int i, j, insert_at = -1; int limit = index; if ( limit > (N-1) ) limit = (N-1); for ( i=0; i < limit; ++i ) { if ( value_list[index_list[i]] > value ) { insert_at = i; int last = index_list[limit]; for ( j = limit; j > i; j-- ) { index_list[j] = index_list[j-1]; } if ( (limit+1) < N ) index_list[limit+1] = last; break; } } if ( insert_at < 0 ) { insert_at = limit; } index_list[insert_at] = index; return insert_at; } int a_is_larger_than_b ( float a, float b ) { if ( isnan(a) | isinf(a) ) return 1; if ( isnan(b) | isinf(b) ) return 0; if ( a > b ) return 1; else return 0; } int insert_largest_into_list ( const float* value_list, int* index_list, const int N, const float value, const int index ) { if ( index == 0 ) { // first element to insert index_list[0] = 0; return 0; } int i, j, insert_at = -1; int limit = index; if ( limit > (N-1) ) limit = (N-1); for ( i=0; i < limit; ++i ) { if ( a_is_larger_than_b(value, value_list[index_list[i]]) ) { insert_at = i; int last = index_list[limit]; for ( j = limit; j > i; j-- ) { index_list[j] = index_list[j-1]; } if ( (limit+1) < N ) index_list[limit+1] = last; break; } } if ( insert_at < 0 ) { insert_at = limit; } index_list[insert_at] = index; return insert_at; } void get_smallest_magnitude_eigenvalues ( int N, int NEV, int num_calculated, float shift, float_complex* eigenvalues, float* eigenvectors, float* residuals, float_complex* temp_eigenvalues, const float* temp_eigenvectors, const float* temp_residuals ) { printf("pointers: %p %p %p %p %p %p\n", eigenvalues, eigenvectors, residuals, temp_eigenvalues, temp_eigenvectors, temp_residuals ); int i; int index; int smallest_index[num_calculated]; float magnitude[num_calculated]; float_complex v; float_complex l; for ( i = 0; i < num_calculated; ++i ) { v = temp_eigenvalues[i]; l = unshift(v, shift); printf("%d: %f +i%f : %f ::: %f +i%f\n", i, v.x, v.y, temp_residuals[i], l.x, l.y ); magnitude[i] = eigenvalue_magnitude ( l.x, l.y ); insert_smallest_into_list( magnitude, smallest_index, num_calculated, magnitude[i], i ); } int j = 0; i = 0; while ( i < NEV && j < num_calculated ) { index = smallest_index[j]; if ( 0 <= index && index < num_calculated ) { v = temp_eigenvalues[index]; l = unshift(v, shift); if ( v.x < 0 && l.x > 0 && temp_residuals[index] < 0.5 ) { eigenvalues[i] = l; residuals[i] = temp_residuals[index]; memcpy ( eigenvectors + N*i, temp_eigenvectors + N*index, N*sizeof(float) ); printf("using: %d : %f+j%f : %e\n", index, v.x, v.y, residuals[i] ); i++; } } j++; } } void calculate_eigen_values ( int N, void* DATA, int NEV, float shift, float* eigenvalues, float* eigenvectors, char* which ) { int i; int use_N_ev = 2*NEV; if ( use_N_ev > ( N/2 - 1 ) ) use_N_ev = N/2 - 1; // select the number of Arnoldi vectors to generate int NCV = 2*use_N_ev + 1; if ( NCV > N ) NCV = N; // allocate temporary storage for the vectors float* temp_ev = (float*)malloc ( NCV*2*sizeof(float) ); float* temp_vectors = (float*) malloc (N*NCV*sizeof(float)); float* temp_residuals = (float*)malloc ( (NCV )*sizeof(float)); float* residuals = (float*)malloc ( (use_N_ev)*sizeof(float)); // solve the eigenvalue problem using ARPACK arpack_ssev(N, (void*)DATA, use_N_ev, NCV, temp_ev, temp_vectors, temp_residuals, which ); // Copy the resultant eigenvalues memcpy(eigenvalues, temp_ev, NEV*2*sizeof(float)); memcpy(eigenvectors, temp_vectors, NEV*N*sizeof(float)); for (i=0; i < NEV; ++i) { printf("%d: %f\n", i, temp_residuals[i] ); } // free the temporary storage free ( temp_ev ); printf("1:\n"); free ( temp_vectors ); printf("2:\n"); free ( temp_residuals ); printf("3:\n"); free ( residuals ); printf("4:\n"); } void calculate_desired_eigenvalues ( int N, void* DATA, int NEV, float shift, float* eigenvalues, float* eigenvectors ) { // solve the eigenvalue problem using ARPACK int use_N_ev = 2*NEV; if ( use_N_ev > ( N/2 - 1 ) ) use_N_ev = N/2 - 1; if ( use_N_ev <= 0 ) use_N_ev = 1; // select the number of Arnoldi vectors to generate int NCV = 2*use_N_ev + 1; if ( NCV > N ) NCV = N; // allocate temporary storage for the vectors float_complex* temp_ev = (float_complex*)malloc ( NCV*sizeof(float_complex) ); float* temp_vectors = (float*) malloc (N*NCV*sizeof(float)); float* temp_residuals = (float*)malloc ( (NCV )*sizeof(float)); float* residuals = (float*)malloc ( (NCV)*sizeof(float)); #ifdef DEBUG_OUTPUT printf( "N=%d, use_N_ev = %d, NCV = %d, NEV = %d\n", N, use_N_ev, NCV, NEV ); #endif arpack_ssev( (int*)&N, (void*)DATA, &use_N_ev, &NCV, (float*)temp_ev, temp_vectors, temp_residuals, "LR" ); // free the temporary storage int largest_index[NCV]; float real[NCV]; float_complex v; float_complex l; int i; for ( i = 0; i < NCV; i++ ) { v = temp_ev[i]; l = invert (v); #ifdef DEBUG_OUTPUT printf("%d: %f+j%f :: %f(%f)+j%f :: %e\n", i, v.x, v.y, l.x, l.x + shift, l.y, temp_residuals[i]); #endif real[i] = v.x; insert_largest_into_list ( real, largest_index, NCV, real[i], i ); } int index, j = 0; i = 0; while ( i < NEV && j < NCV ) { index = largest_index[j]; if ( 0 <= index && index < NCV ) { v = temp_ev[index]; l = invert(v); float_complex t = invert(l); if ( temp_residuals[index] > 1e-9 && temp_residuals[index] < 0.5 && t.x > 1e-5 ) { eigenvalues[2*i] = l.x + shift; eigenvalues[2*i+1] = l.y; residuals[i] = temp_residuals[index]; memcpy ( eigenvectors + N*i, temp_vectors + N*index, N*sizeof(float) ); #ifdef DEBUG_OUTPUT printf("using: %d : %f+j%f : %e+j%e : %e (%f +j%f)\n", index, v.x, v.y, l.x, l.y, residuals[i], t.x, t.y ); #endif i++; } } j++; } free ( temp_ev ); free ( temp_vectors ); free ( temp_residuals ); free ( residuals ); }
ParFriends.h
#ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include <iostream> #include <cstdarg> #include "SpParMat.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" using namespace std; template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** ** Concatenate all the FullyDistVec<IT,NT> objects into a single one **/ template <typename IT, typename NT> FullyDistVec<IT,NT> Concatenate ( vector< FullyDistVec<IT,NT> > & vecs) { if(vecs.size() < 1) { SpParHelper::Print("Warning: Nothing to concatenate, returning empty "); return FullyDistVec<IT,NT>(); } else if (vecs.size() < 2) { return vecs[1]; } else { typename vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin(); shared_ptr<CommGrid> commGridPtr = it->getcommgrid(); MPI_Comm World = commGridPtr->GetWorld(); IT nglen = it->TotalLength(); // new global length IT cumloclen = it->MyLocLength(); // existing cumulative local lengths ++it; for(; it != vecs.end(); ++it) { if(*(commGridPtr) != *(it->getcommgrid())) { SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n"); MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } nglen += it->TotalLength(); cumloclen += it->MyLocLength(); } FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT()); int nprocs = commGridPtr->GetSize(); vector< vector< NT > > data(nprocs); vector< vector< IT > > inds(nprocs); IT gloffset = 0; for(it = vecs.begin(); it != vecs.end(); ++it) { IT loclen = it->LocArrSize(); for(IT i=0; i < loclen; ++i) { IT locind; IT loffset = it->LengthUntil(); int owner = ConCat.Owner(gloffset+loffset+i, locind); data[owner].push_back(it->arr[i]); inds[owner].push_back(locind); } gloffset += it->TotalLength(); } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totrecv = accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0)); NT * senddatabuf = new NT[cumloclen]; for(int i=0; i<nprocs; ++i) { copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]); vector<NT>().swap(data[i]); // delete data vectors } NT * recvdatabuf = new NT[totrecv]; MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data delete [] senddatabuf; IT * sendindsbuf = new IT[cumloclen]; for(int i=0; i<nprocs; ++i) { copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]); vector<IT>().swap(inds[i]); // delete inds vectors } IT * recvindsbuf = new IT[totrecv]; MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds DeleteAll(sendindsbuf, sendcnt, sdispls); for(int i=0; i<nprocs; ++i) { for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j) { ConCat.arr[recvindsbuf[j]] = recvdatabuf[j]; } } DeleteAll(recvindsbuf, recvcnt, rdispls); return ConCat; } } template <typename MATRIXA, typename MATRIXB> bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B) { if(A.getncol() != B.getnrow()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << A.getncol() << " != " << B.getnrow() << endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return false; } if((void*) &A == (void*) &B) { ostringstream outs; outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS); return false; } return true; } /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * @pre { Input matrices, A and B, should not alias } * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } if(clearA) delete A1seq; if(clearB) delete B1seq; // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast * @pre { Input matrices, A and B, should not alias } **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); const_cast< UDERB* >(B.spSeq)->Transpose(); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifndef NDEBUG ostringstream outs; outs << i << "th SUMMA iteration"<< endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER // the last parameter to MergeAll deletes tomerge arrays if(!clearB) const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } template <typename MATRIX, typename VECTOR> void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x) { if(A.getncol() != x.TotalLength()) { ostringstream outs; outs << "Can not multiply, dimensions does not match"<< endl; outs << A.getncol() << " != " << x.TotalLength() << endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) ) { cout << "Grids are not comparable for SpMV" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } /** * Step 1 of the sparse SpMV algorithm * @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated } * @param[in] indexisvalue **/ template<typename IU, typename NV> void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue) { int32_t xlocnz = (int32_t) x.getlocnnz(); int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t int32_t roffset; IU luntil = x.LengthUntil(); int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status); MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status); MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth trxinds = new int32_t[trxlocnz]; int32_t * temp_xind = new int32_t[xlocnz]; for(int i=0; i< xlocnz; ++i) temp_xind[i] = (int32_t) x.ind[i]; MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status); delete [] temp_xind; if(!indexisvalue) { trxnums = new NV[trxlocnz]; MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status); } transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) } /** * Step 2 of the sparse SpMV algorithm * @param[in,out] trxinds, trxnums { deallocated } * @param[in,out] indacc, numacc { allocated } * @param[in,out] accnz { set } * @param[in] trxlocnz, lenuntil, indexisvalue **/ template<typename IU, typename NV> void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums, int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue) { int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); accnz = std::accumulate(colnz, colnz+colneighs, 0); indacc = new int32_t[accnz]; numacc = new NV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING double t0=MPI_Wtime(); #endif MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld); delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) lenuntilcol = lenuntil; MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld); for(int i=0; i< accnz; ++i) // fill numerical values from indices { numacc[i] = indacc[i] + lenuntilcol; } } else { MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld); delete [] trxnums; } #ifdef TIMING double t1=MPI_Wtime(); cblas_allgathertime += (t1-t0); #endif DeleteAll(colnz,dpls); } /** * Step 3 of the sparse SpMV algorithm, with the semiring * @param[in,out] optbuf {scratch space for all-to-all (fold) communication} * @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit} * @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created} **/ template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue) { if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { // optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded dcsc_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } else { dcsc_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = dcsc_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // serial SpMV with sparse vector vector< int32_t > indy; vector< OVT > numy; dcsc_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(indacc, numacc); int32_t bufsize = indy.size(); // as compact as possible sendindbuf = new int32_t[bufsize]; sendnumbuf = new OVT[bufsize]; int32_t perproc = A.getlocalrows() / rowneighs; int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); } } } template <typename SR, typename IU, typename OVT> void MergeContributions(FullyDistSpVec<IU,OVT> & y, int * & recvcnt, int * & rdispls, int32_t * & recvindbuf, OVT * & recvnumbuf, int rowneighs) { // free memory of y, in case it was aliased vector<IU>().swap(y.ind); vector<OVT>().swap(y.num); #ifndef HEAPMERGE IU ysize = y.MyLocLength(); // my local length is only O(n/p) bool * isthere = new bool[ysize]; vector< pair<IU,OVT> > ts_pairs; fill_n(isthere, ysize, false); // We don't need to keep a "merger" because minimum will always come from the processor // with the smallest rank; so a linear sweep over the received buffer is enough for(int i=0; i<rowneighs; ++i) { for(int j=0; j< recvcnt[i]; ++j) { int32_t index = recvindbuf[rdispls[i] + j]; if(!isthere[index]) ts_pairs.push_back(make_pair(index, recvnumbuf[rdispls[i] + j])); } } DeleteAll(recvcnt, rdispls); DeleteAll(isthere, recvindbuf, recvnumbuf); sort(ts_pairs.begin(), ts_pairs.end()); int nnzy = ts_pairs.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = ts_pairs[i].first; y.num[i] = ts_pairs[i].second; } #else // Alternative 2: Heap-merge int32_t hsize = 0; int32_t inf = numeric_limits<int32_t>::min(); int32_t sup = numeric_limits<int32_t>::max(); KNHeap< int32_t, int32_t > sHeap(sup, inf); int * processed = new int[rowneighs](); for(int i=0; i<rowneighs; ++i) { if(recvcnt[i] > 0) { // key, proc_id sHeap.insert(recvindbuf[rdispls[i]], i); ++hsize; } } int32_t key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); y.ind.push_back( static_cast<IU>(key)); y.num.push_back(recvnumbuf[rdispls[locv]]); // nothing is processed yet if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); IU deref = rdispls[locv] + processed[locv]; if(y.ind.back() == static_cast<IU>(key)) // y.ind is surely not empty { y.num.back() = SR::add(y.num.back(), recvnumbuf[deref]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { y.ind.push_back(static_cast<IU>(key)); y.num.push_back(recvnumbuf[deref]); } if( (++(processed[locv])) < recvcnt[locv] ) sHeap.insert(recvindbuf[rdispls[locv]+processed[locv]], locv); else --hsize; } DeleteAll(recvcnt, rdispls,processed); DeleteAll(recvindbuf, recvnumbuf); #endif } /** * This version is the most flexible sparse matrix X sparse vector [Used in KDT] * It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT) * without relying on automatic type promotion * Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x. */ template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf) { CheckSpMVCompliance(A,x); optbuf.MarkEmpty(); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int accnz; int32_t trxlocnz; IU lenuntil; int32_t *trxinds, *indacc; IVT *trxnums, *numacc; /* char errorstring[PAPI_MAX_STR_LEN+1]; int Events2Add [] = {PAPI_TOT_INS, PAPI_L1_TCM, PAPI_L2_TCM, PAPI_L3_TCM}; string EventNames [] = {"PAPI_TOT_INS", "PAPI_L1_TCM", "PAPI_L2_TCM", "PAPI_L3_TCM"}; int arraysize = sizeof(Events2Add) / sizeof(int); long long ptr2values[arraysize]; int errorcode = PAPI_start_counters(Events2Add, arraysize); if (errorcode != PAPI_OK) { PAPI_perror(errorcode, errorstring, PAPI_MAX_STR_LEN); fprintf(stderr, "PAPI error (%d): %s\n", errorcode, errorstring); } */ TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue); AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); /* errorcode = PAPI_read_counters(ptr2values, arraysize); if (errorcode != PAPI_OK) { PAPI_perror(errorcode, errorstring, PAPI_MAX_STR_LEN); fprintf(stderr, "PAPI error (%d): %s\n", errorcode, errorstring); } errorcode = PAPI_stop_counters(ptr2values, arraysize); */ int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); int * sendcnt = new int[rowneighs](); int32_t * sendindbuf; OVT * sendnumbuf; int * sdispls; LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); int32_t * recvindbuf = new int32_t[totrecv]; OVT * recvnumbuf = new OVT[totrecv]; #ifdef TIMING double t2=MPI_Wtime(); #endif if(optbuf.totmax > 0 ) // graph500 optimization enabled { MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); delete [] sendcnt; } else { /* ofstream oput; x.commGrid->OpenDebugFile("Send", oput); oput << "To displacements: "; copy(sdispls, sdispls+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; oput << "To counts: "; copy(sendcnt, sendcnt+rowneighs, ostream_iterator<int>(oput, " ")); oput << endl; for(int i=0; i< rowneighs; ++i) { oput << "To neighbor: " << i << endl; copy(sendindbuf+sdispls[i], sendindbuf+sdispls[i]+sendcnt[i], ostream_iterator<int32_t>(oput, " ")); oput << endl; copy(sendnumbuf+sdispls[i], sendnumbuf+sdispls[i]+sendcnt[i], ostream_iterator<OVT>(oput, " ")); oput << endl; } oput.close(); */ MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, sdispls); } #ifdef TIMING double t3=MPI_Wtime(); cblas_alltoalltime += (t3-t2); #endif // ofstream output; // A.commGrid->OpenDebugFile("Recv", output); // copy(recvindbuf, recvindbuf+totrecv, ostream_iterator<IU>(output," ")); output << endl; // output.close(); MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); SpMV<SR>(A, x, y, indexisvalue, optbuf); } /** * Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type * If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors) **/ template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors SpMV<SR>(A, x, y, indexisvalue, optbuf); return y; } /** * Parallel dense SpMV **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status); NUV * trxnums = new NUV[trxsize]; MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status); int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; fill_n(localy, ysize, id); dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id); int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld); } delete [] localy; return y; } /** * Old version that is no longer considered optimal * Kept for legacy purposes * To be removed when other functionals are fully tested. **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status); MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status); MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status); transform(trxinds, trxinds+trxlocnz, trxinds, bind2nd(plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld); MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector vector< int32_t > indy; vector< T_promote > numy; int32_t * tmpindacc = new int32_t[accnz]; for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i]; delete [] indacc; dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(tmpindacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs; MPI_Comm_size(RowWorld,&rowneighs); vector< vector<IU> > sendind(rowneighs); vector< vector<T_promote> > sendnum(rowneighs); typename vector<int32_t>::size_type outnz = indy.size(); for(typename vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); vector<T_promote>().swap(sendnum[i]); } MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; vector<IU> nzinds; // nonzero indices fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise apply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { cout << "Grids are not comparable elementwise apply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } // plain adapter template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true) { return EWiseApply<RETT, RETDER>(A, B, EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op), allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true); } // end adapter /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> SpParVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const SpParVec<IU,NU1> & V, const DenseParVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { SpParVec< IU, T_promote> Product(V.commGrid); Product.length = V.length; if(Product.diagonal) { if(exclude) { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() <= V.ind[i] || W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } } else { IU size= V.ind.size(); for(IU i=0; i<size; ++i) { if(W.arr.size() > V.ind[i] && W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParVec< IU,T_promote>(); } } /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { cerr << "Vector dimensions don't match for EWiseMult\n"; MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness vector <IU> tlosizes (actual_splits, 0); vector < vector<IU> > tlinds(actual_splits); vector < vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } vector<IU> prefix_sum(actual_splits+1,0); partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { cout << "Grids are not comparable elementwise multiplication" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); FullyDistVec< IU, NU1> DV (V); if(V.TotalLength() != W.TotalLength()) { ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); IU sp_iter = 0; if (allowVNulls) { // iterate over the dense vector for(IU i=0; i<size; ++i) { if(sp_iter < spsize && V.ind[sp_iter] == i) { if (_doOp(V.num[sp_iter], W.arr[i], false, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false)); } sp_iter++; } else { if (_doOp(Vzero, W.arr[i], true, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false)); } } } } else { // iterate over the sparse vector for(sp_iter = 0; sp_iter < spsize; ++sp_iter) { if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)) { Product.ind.push_back(V.ind[sp_iter]); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)); } } } } return Product; } else { cout << "Grids are not comparable for EWiseApply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * !allowVNulls && !allowWNulls => intersection * !allowVNulls && allowWNulls => operate on all elements of V * allowVNulls && !allowWNulls => operate on all elements of W * allowVNulls && allowWNulls => union * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp) { typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { ostringstream outs; outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; typename vector< IU >::const_iterator indV = V.ind.begin(); typename vector< NU1 >::const_iterator numV = V.num.begin(); typename vector< IU >::const_iterator indW = W.ind.begin(); typename vector< NU2 >::const_iterator numW = W.num.begin(); while (indV < V.ind.end() && indW < W.ind.end()) { if (*indV == *indW) { // overlap if (allowIntersect) { if (_doOp(*numV, *numW, false, false)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, *numW, false, false)); } } indV++; numV++; indW++; numW++; } else if (*indV < *indW) { // V has value but W does not if (allowWNulls) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } } indV++; numV++; } else //(*indV > *indW) { // W has value but V does not if (allowVNulls) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } } indW++; numW++; } } // clean up while (allowWNulls && indV < V.ind.end()) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } indV++; numV++; } while (allowVNulls && indW < W.ind.end()) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } indW++; numW++; } } return Product; } else { cout << "Grids are not comparable for EWiseApply" << endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } // plain callback versions template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, Vzero, true); } template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true); } #endif
GB_binop__bshift_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bshift_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint16) // C=scalar+B GB (_bind1st__bshift_uint16) // C=scalar+B' GB (_bind1st_tran__bshift_uint16) // C=A+scalar GB (_bind2nd__bshift_uint16) // C=A'+scalar GB (_bind2nd_tran__bshift_uint16) // C type: uint16_t // A type: uint16_t // B,b type: int8_t // BinaryOp: cij = GB_bitshift_uint16 (aij, bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 0 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_bitshift_uint16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSHIFT || GxB_NO_UINT16 || GxB_NO_BSHIFT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bshift_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bshift_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bshift_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bshift_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_bitshift_uint16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bshift_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_bitshift_uint16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__bshift_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_bitshift_uint16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__bshift_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_dunpack_blasfeo.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from core_blas/core_zlacpy.c, normal z -> d, Thu Aug 8 10:20:04 2019 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #ifdef HAVE_BLASFEO_API #include "blasfeo_d_aux.h" #endif /***************************************************************************//** * * @ingroup core_lacpy * * Copies all or part of a two-dimensional matrix A to another matrix B. * ******************************************************************************* * * @param[in] uplo * - PlasmaGeneral: entire A, * - PlasmaUpper: upper triangle, * - PlasmaLower: lower triangle. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] m * The number of rows of the matrices A and B. * m >= 0. * * @param[in] n * The number of columns of the matrices A and B. * n >= 0. * * @param[in] A * The m-by-n matrix to copy. * * @param[in] lda * The leading dimension of the array A. * lda >= max(1,m). * * @param[out] B * The m-by-n copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. * ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dunpack_blasfeo(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const double *A, int lda, double *B, int ldb) { struct blasfeo_dmat sA; if (transa == PlasmaNoTrans) { #ifdef HAVE_BLASFEO_API // TODO assume double precision !!! blasfeo_create_dmat(m, n, &sA, A); sA.cn = lda; blasfeo_unpack_dmat(m, n, &sA, 0, 0, B, ldb); #else LAPACKE_dlacpy_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, A, lda, B, ldb); #endif } else if (transa == PlasmaTrans) { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = A[i + j*lda]; break; case PlasmaGeneral: #ifdef HAVE_BLASFEO_API // TODO assume double precision !!! // blasfeo_create_dmat(m, n, &sB, B); // blasfeo_pack_tran_dmat(m, n, A, lda, &sB, 0, 0); #else for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = A[i + j*lda]; #endif break; } } else { switch (uplo) { case PlasmaUpper: for (int i = 0; i < imin(m, n); i++) for (int j = i; j < n; j++) B[j + i*ldb] = (A[i + j*lda]); break; case PlasmaLower: for (int i = 0; i < m; i++) for (int j = 0; j <= imin(i, n); j++) B[j + i*ldb] = (A[i + j*lda]); break; case PlasmaGeneral: #ifdef HAVE_BLASFEO_API // TODO #else for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) B[j + i*ldb] = (A[i + j*lda]); #endif break; } } } /******************************************************************************/ void plasma_core_omp_dunpack_blasfeo(plasma_enum_t uplo, plasma_enum_t transa, int m, int n, const double *A, int lda, double *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:B[0:ldb*n]) { if (sequence->status == PlasmaSuccess) plasma_core_dunpack_blasfeo(uplo, transa, m, n, A, lda, B, ldb); } }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } //template<typename xpu, typename LOP, typename ROP, typename DType> //static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // using namespace mxnet_op; // Stream<xpu> *s = ctx.get_stream<xpu>(); // const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) // / DataType<DType>::kLanes); // const DType *ograd_dptr = inputs[0].dptr<DType>(); // if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { // CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); // } else if (req[0] != kNullOp) { // DType *lgrad_dptr = outputs[0].dptr<DType>(); // MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { // Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); // }); // } // if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { // CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); // } else if (req[1] != kNullOp) { // DType *rgrad_dptr = outputs[1].dptr<DType>(); // MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { // Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); // }); // } //} //template<typename xpu, typename LOP, typename ROP, typename DType> //static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // DCHECK_EQ(outputs.size(), 2U); // DCHECK_EQ(inputs.size(), 3U); // mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); // const DType *ograd_dptr = inputs[0].dptr<DType>(); // const DType *lhs_dptr = inputs[1].dptr<DType>(); // const DType *rhs_dptr = inputs[2].dptr<DType>(); // MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { // const int size = static_cast<int>( // (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) // / mxnet_op::DataType<DType>::kLanes); // DType * lgrad_dptr = outputs[0].dptr<DType>(); // mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( // s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); // MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { // const int size = static_cast<int>( // (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) // / mxnet_op::DataType<DType>::kLanes); // DType * rgrad_dptr = outputs[1].dptr<DType>(); // mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( // s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); //} //template< // typename xpu, // typename LOP, // typename ROP, // typename DType, // bool in0_ok_dense = false, // bool in1_ok_dense = false, // bool in2_ok_dense = false, // typename BackupCompute> //static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<NDArray> &inputs, // const std::vector<OpReqType> &req, // const std::vector<NDArray> &outputs, // BackupCompute backup_compute) { // mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // // lhs grad // if (req[0] != kNullOp) { // // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 // MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { // RspRspOp<DType, IType, LOP>( // s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], // false, false, false, false); // }); // // lhs in-place // MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { // RspRspOp<DType, IType, op::mshadow_op::mul>( // s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], // false, false, true, false); // }); // } // // rhs grad // if (req[1] != kNullOp) { // MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { // RspRspOp<DType, IType, ROP>( // s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], // false, false, false, false); // }); // // rhs in-place // MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { // RspRspOp<DType, IType, op::mshadow_op::mul>( // s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], // false, false, true, false); // }); // } //} protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the inputs to be dense and still produce a sparse output * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool lhs_dense_ok = true, bool rhs_dense_ok = true> static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched) { if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) { // rsp, rsp -> rsp // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } else if ((lhs_stype == kCSRStorage && rhs_dense_ok) || (rhs_stype == kCSRStorage && lhs_dense_ok)) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeFallback); } } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ //static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, // int dev_mask, // DispatchMode* dispatch_mode, // std::vector<int> *in_attrs, // std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } //template<typename xpu, typename LOP, typename ROP> //static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { // BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); // }); //} //template<typename xpu, typename LOP, typename ROP> //static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { // BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); // }); //} template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } //template<typename xpu, typename LOP, typename ROP> //static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { // BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); // }); //} //template<typename xpu, typename LOP, typename ROP> //static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<TBlob> &inputs, // const std::vector<OpReqType> &req, // const std::vector<TBlob> &outputs) { // MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { // BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); // }); //} //template< // typename xpu, typename LOP, typename ROP, // bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> //static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, // const OpContext &ctx, // const std::vector<NDArray> &inputs, // const std::vector<OpReqType> &req, // const std::vector<NDArray> &outputs) { // using namespace common; // CHECK_EQ(inputs.size(), 3U); // CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad // const auto lhs_grad_stype = outputs[0].storage_type(); // const auto rhs_grad_stype = outputs[1].storage_type(); // if (ContainsOnlyStorage(inputs, kRowSparseStorage) && // (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && // (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] // MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { // BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( // attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); // }); // } //} }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
hhalignment-C.h
/* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /********************************************************************* * Clustal Omega - Multiple sequence alignment * * Copyright (C) 2010 University College Dublin * * Clustal-Omega is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is part of Clustal-Omega. * ********************************************************************/ /* * RCS $Id: hhalignment-C.h 236 2011-04-14 11:30:04Z fabian $ */ /* * Changelog: Michael Remmert made changes to hhalign stand-alone code * FS implemented some of the changes on 2010-10-28 -> MR1 * * Note: MR seems to have changed all [aijk]++ to ++[aijk], * FS did not do that on 2010-10-28 */ // hhalignment.C ////////////////////////////////////////////////////////////////////////////// //// Class Alignment ////////////////////////////////////////////////////////////////////////////// // hhalignment.C #ifndef MAIN #define MAIN #include <iostream> // cin, cout, cerr #include <fstream> // ofstream, ifstream #include <stdio.h> // printf using std::cout; using std::cerr; using std::endl; using std::ios; using std::ifstream; using std::ofstream; #include <stdlib.h> // exit #include <string> // strcmp, strstr #include <math.h> // sqrt, pow #include <limits.h> // INT_MIN #include <float.h> // FLT_MIN #include <time.h> // clock #include <ctype.h> // islower, isdigit etc #include "util-C.h" // imax, fmax, iround, iceil, ifloor, strint, strscn, strcut, substr, uprstr, uprchr, Basename etc. #include "list.h" // list data structure #include "hash.h" // hash data structure #include "hhdecl-C.h" #include "hhutil-C.h" // imax, fmax, iround, iceil, ifloor, strint, strscn, strcut, substr, uprstr, uprchr, Basename etc. #include "hhhmm.h" #endif enum {KEEP_NOT = 0, KEEP_CONDITIONALLY, KEEP_ALWAYS}; ////////////////////////////////////////////////////////////////////////////// // Class Alignment ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Object constructor ////////////////////////////////////////////////////////////////////////////// Alignment::Alignment(int maxseq, int maxres) { //printf(">>>>>>>>%s:%s:%d: maxseq=%d, maxres=%d\n", __FUNCTION__, __FILE__, __LINE__, maxseq, maxres); /* (FS) */ longname = new(char[DESCLEN]); sname = new(char*[maxseq+2]); /* MR1 */ seq = new(char*[maxseq+2]); /* MR1 */ l = new(int[maxres]); X = new(char*[maxseq+2]); /* MR1 */ I = new(short unsigned int*[maxseq+2]); /* MR1 */ keep = new(char[maxseq+2]); /* MR1 */ display = new(char[maxseq+2]); /* MR1 */ wg = new(float[maxseq+2]); /* MR1 */ nseqs = new(int[maxres+2]); /* MR1 */ N_in=L=0; nres=NULL; // number of residues per sequence k first=NULL; // first residue in sequence k last=NULL; // last residue in sequence k ksort=NULL; // sequence indices sorted by descending nres[k] name[0]='\0'; // no name defined yet longname[0]='\0'; // no name defined yet fam[0]='\0'; // no name defined yet file[0]='\0'; // no name defined yet readCommentLine = '0'; /* MR1 */ } ////////////////////////////////////////////////////////////////////////////// // Object destructor ////////////////////////////////////////////////////////////////////////////// Alignment::~Alignment() { delete[] longname; longname = NULL; for(int k=0; k<N_in; k++) { delete[] sname[k]; sname[k] = NULL; delete[] seq[k]; seq[k] = NULL; delete[] X[k]; X[k] = NULL; delete[] I[k]; I[k] = NULL; } delete[] sname; sname = NULL; delete[] seq; seq = NULL; delete[] X; X = NULL; delete[] I; I = NULL; delete[] l; l = NULL; delete[] keep; keep = NULL; delete[] display; display = NULL; delete[] wg; wg = NULL; delete[] nseqs; nseqs = NULL; delete[] nres; nres = NULL; delete[] first; first = NULL; delete[] last; last = NULL; delete[] ksort; ksort = NULL; } /** * @brief Reads in an alignment from file into matrix seq[k][l] as ASCII */ void Alignment::Read(FILE* inf, char infile[], char* firstline) { int l; // Postion in alignment incl. gaps (first=1) int h; // Position in input line (first=0) int k; // Index of sequence being read currently (first=0) char line[LINELEN]=""; // input line //char cur_seq[MAXCOL]; // Sequence currently read in char *cur_seq=new(char[par.maxColCnt]); char* cur_name; // Sequence currently read in int linenr=0; // current line number in input file char skip_sequence=0; RemoveExtension(file,infile); //copy rootname (w/o path) of infile into file variable of class object kss_dssp=ksa_dssp=kss_pred=kss_conf=kfirst=-1; n_display=0; N_in=0; N_filtered=0; N_ss=0; cur_seq[0]=' '; // overwrite '\0' character at beginning to be able to do strcpy(*,cur_seq) l=1; k=-1; // Does firstline already contain first line of file? if (firstline!= NULL) strcpy(line,firstline); ///////////////////////////////////////////////////////////////////////// // Read infile line by line /* FIXME: not safe to use MAXSEQ, however, don't think we ever get here (FS) */ while(firstline || (fgetline(line,LINELEN,inf) && (k<MAXSEQ))) /* FIXME: FS introduced () around &&, precedence! MR1 */ { linenr++; firstline=NULL; if (line[0]=='>') //line contains sequence name { if (k>=MAXSEQ-1) { if (v>=1 && k>=MAXSEQ) cerr<<endl<<"WARNING: maximum number "<<MAXSEQ<<" of sequences exceded in file "<<infile<<"\n"; break; } cur_name=line+1; //beginning of current sequence name if (k>=0) //if this is at least the second name line { if (strlen(cur_seq)==0) { cerr<<endl<<"Error: sequence "<<sname[k]<<" contains no residues."<<endl; exit(1); } // Create space for residues and paste new sequence in seq[k]=new(char[strlen(cur_seq)+2]); if (!seq[k]) MemoryError("array for input sequences"); X[k]=new(char[strlen(cur_seq)+2]); if (!X[k]) MemoryError("array for input sequences"); I[k]=new(short unsigned int[strlen(cur_seq)+2]); if (!I[k]) MemoryError("array for input sequences"); strcpy(seq[k],cur_seq); } skip_sequence=0; k++; l=1; //position in current sequence (first=1) // display[k]= 0: do not show in Q-T alignments 1: show if not filtered out later 2: show in any case (do not filter out) // keep[k] = 0: do not include in profile 1: include if not filtered out later 2: include in any case (do not filter out) /* {KEEP_NOT=0, KEEP_CONDITIONALLY=1, KEEP_ALWAYS=2} */ if (line[1]=='@') cur_name++; //skip @-character in name if (!strncmp(line,">ss_dssp",8)) { if (kss_dssp<0) {display[k]=2; n_display++; keep[k]=KEEP_NOT; kss_dssp=k; N_ss++;} else {skip_sequence=1; k--; continue;} } else if (!strncmp(line,">sa_dssp",8)) { if (ksa_dssp<0) {display[k]=KEEP_ALWAYS; n_display++; keep[k]=KEEP_NOT; ksa_dssp=k; N_ss++;} else {skip_sequence=1; k--; continue;} } else if (!strncmp(line,">ss_pred",8)) { if (kss_pred<0) {display[k]=KEEP_ALWAYS; n_display++; keep[k]=KEEP_NOT; kss_pred=k; N_ss++;} else {skip_sequence=1; k--; continue;} } else if (!strncmp(line,">ss_conf",8)) { if (kss_conf<0) {display[k]=KEEP_ALWAYS; n_display++; keep[k]=KEEP_NOT; kss_conf=k; N_ss++;} else {skip_sequence=1; k--; continue;} } else if (!strncmp(line,">ss_",4) || !strncmp(line,">sa_",4)) { display[k]=KEEP_ALWAYS; n_display++; keep[k]=KEEP_NOT; N_ss++; } else if (!strncmp(line,">aa_",4)) { // ignore sequences beginning with ">aa_" skip_sequence=1; k--; continue; } //store first real seq else if (kfirst<0) { char word[NAMELEN]; strwrd(word,line); // Copies first word in ptr to str if (strstr(word,"_consensus")) {display[k]=2; keep[k]=0; n_display++; kfirst=k;} /* MR1 */ else {display[k]=keep[k]=KEEP_ALWAYS; n_display++; kfirst=k;} } //store all sequences else if (par.mark==0) {display[k]=keep[k]=KEEP_CONDITIONALLY; n_display++;} //store sequences up to nseqdis else if (line[1]=='@'&& n_display-N_ss<par.nseqdis) {display[k]=keep[k]=KEEP_ALWAYS; n_display++;} else {display[k]=KEEP_NOT; keep[k]=KEEP_CONDITIONALLY;} // store sequence name if (v>=4) printf("Reading seq %-16.16s k=%3i n_displ=%3i display[k]=%i keep[k]=%i\n",cur_name,k,n_display,display[k],keep[k]); sname[k] = new(char[strlen(cur_name)+1]); if (!sname[k]) {MemoryError("array for sequence names");} strcpy(sname[k],cur_name); } // end if(line contains sequence name) else if (line[0]=='#') // Commentary line? { // #PF01367.9 5_3_exonuc: 5'-3' exonuclease, C-terminal SAM fold; PDB 1taq, 1bgx (T:271-174), 1taq (271-174) if (name[0]) continue; // if already name defined: skip commentary line char *ptr1, *ptr2; ptr1=strscn_(line+1); // set ptr1 to first non-whitespace character after '#' -> AC number strncpy(longname,ptr1,DESCLEN-1); // copy whole commentary line after '# ' into longname longname[DESCLEN-1]='\0'; strtr(longname,""," "); ptr2=strcut_(ptr1); // cut after AC number and set ptr2 to first non-whitespace character after AC number // strcpy(fam,ptr1); // copy AC number to fam // if (!strncmp(fam,"PF",2)) strcut_(fam,'.'); // if PFAM identifier contains '.' cut it off // strcut_(ptr2); // cut after first word ... strcpy(name,ptr1); // ... and copy first word into name readCommentLine = '1'; /* MR1 */ } //line contains sequence residues or SS information and does not belong to a >aa_ sequence else if (!skip_sequence) { if (v>=4) cout<<line<<"\n"; //DEBUG if (k==-1 && v) { cerr<<endl<<"WARNING: No sequence name preceding following line in "<<infile<<":\n\'"<<line<<"\'\n"; continue; } h=0; //counts characters in current line // Check whether all characters are correct; store into cur_seq if (keep[k] || (k == kfirst) ) // normal line containing residues /* MR1 */ { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (aa2i(line[h])>=0) // ignore white-space characters ' ', \t and \n (aa2i()==-1) {cur_seq[l]=line[h]; l++;} else if (aa2i(line[h])==-2 && v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<h<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } else if (k==kss_dssp) // lines with dssp secondary structure states (. - H E C S T G B) { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (ss2i(line[h])>=0 && ss2i(line[h])<=7) {cur_seq[l]=ss2ss(line[h]); l++;} else if (v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<h<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } else if (k==ksa_dssp) // lines with dssp solvent accessibility states (. - ???) { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (sa2i(line[h])>=0) cur_seq[l++]=line[h]; else if (v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<h<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } else if (k==kss_pred) // lines with predicted secondary structure (. - H E C) { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (ss2i(line[h])>=0 && ss2i(line[h])<=3) {cur_seq[l]=ss2ss(line[h]); l++;} else if (v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<h<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } else if (k==kss_conf) // lines with confidence values should contain only 0-9, '-', or '.' { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (line[h]=='-' || line[h]=='.' || (line[h]>='0' && line[h]<='9')) {cur_seq[l]=line[h]; l++;} else if (v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<l<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } else if (display[k]) // other lines such as >sa_pred etc { while (h<LINELEN && line[h]>'\0' && l</*MAXCOL*/par.maxColCnt-1) { if (line[h]=='-' || line[h]=='.' || (line[h]>='0' && line[h]<='9') || (line[h]>='A' && line[h]<='B')) {cur_seq[l]=line[h]; l++;} else if (v) cerr<<endl<<"WARNING: invalid symbol \'"<<line[h]<<"\' at pos. "<<l<<" in line "<<linenr<<" of "<<infile<<"\n"; h++; } } if (v && l>=/*MAXCOL*/par.maxColCnt-1) { cerr<<endl<<"WARNING: maximum number of residues "<</*MAXCOL*/par.maxColCnt-2<<" exceded in sequence "<<sname[k]<<"\n"; skip_sequence=1; } cur_seq[l]='\0'; //Ensure that cur_seq ends with a '\0' character } //end else } ///////////////////////////////////////////////////////////////////////// if (k>=0) //if at least one sequence was read in { seq[k]=new(char[strlen(cur_seq)+2]); if (!seq[k]) MemoryError("array for input sequences"); X[k]=new(char[strlen(cur_seq)+2]); if (!X[k]) MemoryError("array for input sequences"); I[k]=new(short unsigned int[strlen(cur_seq)+2]); if (!I[k]) MemoryError("array for input sequences"); strcpy(seq[k],cur_seq); } else {cerr<<endl<<"Error: no sequences found in file "<<infile<<"\n"; exit(1);} N_in = k+1; // Set name, longname, fam if (!*name) // longname, name and family were not set by '#...' line yet -> extract from first sequence { char* ptr; // strtr(sname[kfirst],"~"," "); // 'transpose': replaces the tilde with a blanc everywhere in sname[kfirst] strncpy(longname,sname[kfirst],DESCLEN-1); // longname is name of first sequence longname[DESCLEN-1]='\0'; strncpy(name,sname[kfirst],NAMELEN-1); // Shortname is first word of longname... name[NAMELEN-1]='\0'; ptr = strcut(name); // ...until first white-space character if (ptr && islower(ptr[0]) && ptr[1]=='.' && isdigit(ptr[2])) //Scop family code present as second word? { lwrstr(name); // Transform upper case to lower case strcut(ptr); // Non-white-space characters until next white-space character.. strcpy(fam,ptr); // ...are the SCOP familiy code } else if (name[0]=='P' && name[1]=='F' && isdigit(name[2]) && isdigit(name[3]) ) //Pfam code { strcpy(fam,name); // set family name = Pfam code } } delete[] cur_seq; cur_seq = NULL; // Checking for warning messages if (v==0) return; if (v>=2) cout<<"Read "<<infile<<" with "<<N_in<<" sequences\n"; if (v>=3) cout<<"Query sequence for alignment has number "<<kfirst<<" (0 is first)\n"; return; } /* * At this point GetSeqsFromHMM() slots in, however, * only needed in hhbliys.C, so will skip it for moment, MR1 */ ///////////////////////////////////////////////////////////////////////////// /** * @brief Convert ASCII in seq[k][l] to int (0-20) in X[k][i], * throw out all insert states, record their number in I[k][i] * and store sequences to be displayed in seq[k] */ ///////////////////////////////////////////////////////////////////////////// void Alignment::Compress(const char infile[]) { int i; // Index for match state (first=1) int l; // Postion in alignment incl. gaps (first=1) int k; // Index for sequences (first=0) int a; // amino acid index char c; int unequal_lengths=0; /* k: seq k doesn't have same number of match states as seq 0 => WARNING */ /* points to next character in seq[k] to be written */ /*static short unsigned int h[MAXSEQ];*/ /*short*/ unsigned int *h = NULL; /* short may lead to overflow for long alignments, FS, r235 -> r236 */ h = new(/*short*/ unsigned int[N_in+2]); /* short -> overflow, FS, r235 -> r236 */ float *percent_gaps = NULL; /* FS, 2010-Nov */ char *match_state = NULL; /* FS, 2010-Nov */ // Initialize for (k=0;k<N_in; k++) {I[k][0]=0;} if (v>=3) { if (par.M==1) cout<<"Using match state assignment by capital letters (a2m format)\n"; else if (par.M==2) cout<<"Using percentage-rule match state assignment\n"; else if (par.M==3) cout<<"Using residues of first sequence as match states\n"; } // Create matrices X and I with amino acids represented by integer numbers switch(par.M) { ///////////////////////////////////////////////////////////////////////// /* a2m/a3m format: match states capital case, inserts lower case, delete states '-', inserted gaps '.' The confidence values for ss prediction are interpreted as follows: 0-9:match states(!) '-' :match state '.':insert */ case 1: default: // Warn if alignment is ment to be -M first or -M NN instead of A2M/A3M if (v>=2 && strchr(seq[kfirst],'-') ) // Seed/query sequence contains a gap ... { for (k=1; k<N_in; k++) if (strpbrk(seq[k],"abcdefghiklmnpqrstuvwxyz.")) break; if (k==N_in) // ... but alignment contains no lower case residue printf("WARNING: input alignment %s looks like aligned FASTA instead of A2M/A3M format. Consider using '-M first' or '-M 50'\n",infile); } // Remove '.' characters from seq[k] for(k=0; k<N_in; k++) { char* ptrS=seq[k]; // pointer to source: character in seq[k] char* ptrD=seq[k]; // pointer to destination: seq[k] while(1) // omit '.' symbols { if (*ptrS!='.') {*ptrD=*ptrS; ptrD++;} //leave out '.' symbols if (!*ptrS) break; ptrS++; } } L=/*MAXRES*/par.maxResLen-2; // needed because L=imin(L,i) for (k=0; k<N_in; k++) { i=1; l=1; // start at i=1, not i=0! if (keep[k]) //skip >ss_dssp, >ss_pred, >ss_conf, >aa_... sequences { while((c=seq[k][l++])) // assign residue to c at same time { if (c>='a' && c<='z') I[k][i-1]++;//insert state = lower case character else if (c!='.') //match state = upper case character { X[k][i]=aa2i(c); I[k][i]=0; i++; } } } else if (k==kss_dssp || k==kss_pred) // does alignment contain sequence of secondary structure states? { while((c=seq[k][l++])) // assign residue to c at same time if (c!='.' && !(c>='a' && c<='z')) X[k][i++]=ss2i(c); //match state = upper case character } else if (k==ksa_dssp) // does alignment contain sequence of prediction confidence values? { while((c=seq[k][l++])) // assign residue to c at same time if (c!='.' && !(c>='a' && c<='z')) X[k][i++]=sa2i(c); //match state = upper case character } else if (k==kss_conf) // does alignment contain sequence of prediction confidence values? { while((c=seq[k][l++])) // assign residue to c at same time if (c!='.') X[k][i++]=cf2i(c); //match state = 0-9 or '-' } else if (k==kfirst) // does alignment contain sequence of prediction confidence values? { while((c=seq[k][l++])) // assign residue to c at same time if (c!='.') { X[k][i]=aa2i(c); I[k][i]=0; ++i; } } else continue; i--; if (L!=i && L!=/*MAXRES*/par.maxResLen-2 && !unequal_lengths) unequal_lengths=k; //sequences have different lengths L=imin(L,i); } if (unequal_lengths) break; //Replace GAP with ENDGAP for all end gaps /* MR1 */ for (k=0; k<N_in; ++k) { if (!keep[k]) continue; for (i=1; i<=L && X[k][i]==GAP; i++) X[k][i]=ENDGAP; /* MR1: NOTE i++ <- ++i */ for (i=L; i>=1 && X[k][i]==GAP; i--) X[k][i]=ENDGAP; /* MR1 */ } for (i=1; i<=L; i++) this->l[i]=i; //assign column indices to match states if (L<=0) { cout<<"\nError: Alignment in "<<infile<<" contains no match states. Consider using -M first or -M <int> option"<<endl; exit(1); } if (L==/*MAXRES*/par.maxResLen-2 && v>=2) { printf("WARNING: Number of match columns too large. Only first %i match columns will be kept!\n",L); break; } if (v>=2) cout<<"Alignment in "<<infile<<" contains "<<L<<" match states\n"; break; ///////////////////////////////////////////////////////////////////////// // gap-rule assignment of match states case 2: int nl[NAA+2]; //nl[a] = number of seq's with amino acid a at position l /* Note: allocating statically is fine most of the time but when the sequences/profiles get really long we might run out of memory, so must really do it dynamically. had to move declaration of float *percent_gaps out of switch() */ //float percent_gaps[MAXCOL]; //percentage of gaps in column k (with weighted sequences) percent_gaps = new(float[par.maxColCnt]); //determine number of columns L in alignment L=strlen(seq[kfirst])-1; // Conversion to integer representation, checking for unequal lengths and initialization if (nres==NULL) nres=new(int[N_in]); for (k=0; k<N_in; k++) { if (!keep[k]) continue; int nr=0; wg[k]=0; nres[k]=0; for (l=1; l<=L; l++) { X[k][l]=aa2i(seq[k][l]); if (X[k][l]<NAA) nr++; } nres[k]=nr; if (seq[k][L+1]!='\0' && !unequal_lengths) unequal_lengths=k; } if (unequal_lengths) break; // Quick and dirty calculation of the weight per sequence wg[k] for (l=1; l<=L; l++) // for all positions l in alignment { int naa=0; //number of different amino acids for (a=0; a<20; a++) nl[a]=0; for (k=0; k<N_in; k++) if (keep[k]) nl[ (int)X[k][l]]++; for (a=0; a<20; a++) if(nl[a]) naa++; if (!naa) naa=1; //naa=0 when column consists of only gaps and Xs (=ANY) for (k=0; k<N_in; k++) if (keep[k] && (X[k][l]<20) ) { //wg[k]+=1.0/float(nl[ (int)X[k][l]]*naa*nres[k]+30.0); /* original version */ wg[k] += 1.0/float(nl[ (int)X[k][l]]*naa*(nres[k]+30.0)); /* MR1 */ // wg[k] += 1.0/float(nl[ (int)X[k][l]]*(nres[k]+30.0)); /* MR1 commented out */ // wg[k] += (naa-1.0)/float(nl[ (int)X[k][l]]*(nres[k]+30.0)); /* MR1 commented out */ } } /* 1=l<=L*/ //Replace GAP with ENDGAP for all end gaps for (k=0; k<N_in; ++k) { if (!keep[k]) continue; for (i=1; i<=L && X[k][i]==GAP; i++) X[k][i]=ENDGAP; /* MR1: NOTE i++ <- ++i */ for (i=L; i>=1 && X[k][i]==GAP; i--) X[k][i]=ENDGAP; /* MR1 */ } // Add up percentage of gaps for (l=1; l<=L; l++) { float res=0; float gap=0; for (k=0; k< N_in; k++){ if (keep[k]){ if ( X[k][l]<GAP) res+=wg[k]; /* MR1, AA or ANY, changed from <ANY */ else if ( X[k][l] != ENDGAP) gap+=wg[k]; /* MR1, else: GAP. ENDGAPs are ignored for counting percentage */ } } percent_gaps[l]=100.*gap/(res+gap); if (v>=4) cout<<"percent gaps["<<l<<"]="<<percent_gaps[l]<<" first seq:"<<seq[0][l]<<"\n"; } /* Insert states 'bloat' the HMM, throwing them out 'slims' down the HMM. A slimmer HMM takes less time to construct. However, the marriage of Clustal and Hhalign is particularly sensitive to residues at the very end of the profile; these I call 'telomeres'. Telomeres must not be shed when throwing out insert states, for the telomeres we set the match threshold to 100%. */ #define MGAP_LOGIC 0 #define TELOMERE_LOGIC 1 #define TELOMERE_DYNAMIC 0 #define ALWAYS_ACCEPT 101.0 /* do NOT change this parameter, must be >=100, make slightly bigger than 100% -- to be sure to be sure */ #define DEFAULT_MGAPS 100.0 /* Soeding's default is 50, omega default prior to telomere logic was 100 FIXME: this used to be par.Mgaps, in a later version re-introduce par.Mgaps to keep this value flexible */ #define TELOMER_LENGTH 10 /* this parameter must be > 0 (unless DEFAULT_MGAPS=100), if it is too big (L/2) then telomere logic has no effect, don't think it should be changed (much) */ #define TELOMER_FRACTION 0.10 //#define HMM_MIN_LENGTH 0.923 #define HMM_MIN_LENGTH 0.950 #define FORTRAN_OFFSET 1 double dDefaultMgaps; dDefaultMgaps = DEFAULT_MGAPS; #if TELOMERE_LOGIC /* turn telomere logic on (1) or off (0) */ int iTelomereLength; #if TELOMERE_DYNAMIC /* keep telomere length 'dynamic' */ iTelomereLength = TELOMER_LENGTH > (int)(L*TELOMER_FRACTION) ? TELOMER_LENGTH : (int)(L*TELOMER_FRACTION); #else iTelomereLength = TELOMER_LENGTH; #endif /* this was dynamic telomere */ #endif /* this was telomere logic */ /* if HMMs get too small (much smaller than profile length L) then one is liable to get a back-tracking error. So we should ensure that the DEFAULT_MGAPS parameter does NOT shrink the HMM too much. take percentage-gap vector, sort it, and fix dDefaultMgaps, such that at least (HMM_MIN_LENGTH)*(L) are left */ #if MGAP_LOGIC /* try to adapt Mgaps to size of final HMM */ { float *pfPercentGaps = NULL; if (NULL == (pfPercentGaps = (float *)malloc((L+1)*sizeof(float)))){ printf("%s:%s:%d: could not malloc %d float for sorted percent-gaps\n", __FUNCTION__, __FILE__, __LINE__, L+1); dDefaultMgaps = DEFAULT_MGAPS; } else { for (l = 0; l < L; l++) { pfPercentGaps[l] = percent_gaps[l+FORTRAN_OFFSET]; } qsort(pfPercentGaps, L, sizeof(float), CompFltAsc); dDefaultMgaps = pfPercentGaps[(int)(HMM_MIN_LENGTH*L)]; if (dDefaultMgaps < DEFAULT_MGAPS){ //printf("Mgaps = %f <- %f\n", DEFAULT_MGAPS, dDefaultMgaps); dDefaultMgaps = DEFAULT_MGAPS; } else { //printf("Mgaps = %f\n", dDefaultMgaps); } free(pfPercentGaps); pfPercentGaps = NULL; } } #endif /* tried to adapt Mgaps to size of final HMM */ // Throw out insert states and keep only match states i=0; for (k=0; k<N_in; k++) {h[k]=1; seq[k][0]='-';} for (l=1; l<=L; l++) { #if TELOMERE_LOGIC float fMgaps = ALWAYS_ACCEPT; if ( (l < iTelomereLength) || (L-l < iTelomereLength) ){ /* residue is in telomere, always retain this position */ fMgaps = ALWAYS_ACCEPT; } else if (0){ /* FIXME: would like to put a transition phase in here, where the Mgap value gradually goes down from 100 to DEFAULT_MGAPS, however, may not be necessary and will make code more clunky */ } else { /* position is in centre of sequence, retain position if less than DEFAULT_MGAPS% gaps at this position, for example, if DEFAULT_MGAPS=30 throw out if more than 30% gap. conversely, if DEFAULT_MGAPS=100 throw out if more than 100% gaps, which can never happen, so always retain */ fMgaps = dDefaultMgaps; } if (percent_gaps[l] <= fMgaps) #else /* this was telomere logic */ if (percent_gaps[l]<=float(par.Mgaps)) #endif /* this was Soeding default */ { if (i>=/*MAXRES*/par.maxResLen-2) { if (v>=1) printf("WARNING: Number of match columns too large. Only first %i match columns will be kept!\n",i); break; } i++; this->l[i]=l; for (k=0; k<N_in; k++) { if (keep[k]) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=X[k][l]; I[k][i]=0; } else if (k==kss_dssp || k==kss_pred) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=ss2i(seq[k][l]); } else if (k==ksa_dssp) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=sa2i(seq[k][l]); } else if (k==kss_conf) { seq[k][h[k]++]=seq[k][l]; X[k][i]=cf2i(seq[k][l]); } } } else { for (k=0; k<N_in; k++) if (keep[k] && X[k][l]<GAP) { I[k][i]++; seq[k][h[k]++]=InsertChr(seq[k][l]); } } } for (k=0; k<N_in; k++) seq[k][h[k]]='\0'; //printf("%d\t%d\t%d\tN/L/M\n", N_in, L, i); /* -------- FIXME */ if (v>=2) cout<<"Alignment in "<<infile<<" contains "<<L<<" columns and "<<i<<" match states\n"; L = i; //Number of match states delete[] percent_gaps; percent_gaps = NULL; break; //////////////////////////////////////////////////////////////////////// // Using residues of first sequence as match states case 3: /* Note: allocating statically is fine most of the time but when the sequences/profiles get really long we might run out of memory, so must really do it dynamically. had to move declaration of float *percent_gaps out of switch() */ //char match_state[MAXCOL]; //1: column assigned to match state 0: insert state match_state = new(char[par.maxColCnt]); // Determine number of columns L in alignment L=strlen(seq[0]+1); if (v>=3) printf("Length of first seq = %i\n",L); // Check for sequences with unequal lengths for (k=1; k<N_in; k++) if (int(strlen(seq[k]+1))!=L) {unequal_lengths=k; break;} if (unequal_lengths) break; // Determine match states: seq kfirst has residue at pos l -> match state for (l=1; l<=L; l++) if (isalpha(seq[kfirst][l])) match_state[l]=1; else match_state[l]=0; // Throw out insert states and keep only match states for (k=0; k<N_in; k++) {h[k]=1; seq[k][0]='-';} i=0; for (l=1; l<=L; l++) { if (match_state[l]) // does sequence 0 have residue at position l? { if (i>=/*MAXRES*/par.maxResLen-2) { if (v>=1) printf("WARNING: Number of match columns too large. Only first %i match columns will be kept!\n",i); break; } i++; this->l[i]=l; for (k=0; k<N_in; k++) { if (keep[k]) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=aa2i(seq[k][l]); I[k][i]=0; } else if (k==kss_dssp || k==kss_pred) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=ss2i(seq[k][l]); } else if (k==ksa_dssp) { seq[k][h[k]++]=MatchChr(seq[k][l]); X[k][i]=sa2i(seq[k][l]); } else if (k==kss_conf) { seq[k][h[k]++]=seq[k][l]; X[k][i]=cf2i(seq[k][l]); } } } else { for (k=0; k<N_in; k++) if (keep[k] && aa2i(seq[k][l])<GAP) { I[k][i]++; seq[k][h[k]++]=InsertChr(seq[k][l]); } } } for (k=0; k<N_in; k++) seq[k][h[k]]='\0'; //Replace GAP with ENDGAP for all end gaps /* MR1 */ for (k=0; k<N_in; ++k) { if (!keep[k]) continue; for (i=1; i<=L && X[k][i]==GAP; i++) X[k][i]=ENDGAP; /* MR1, note i++ <- ++i */ for (i=L; i>=1 && X[k][i]==GAP; i--) X[k][i]=ENDGAP; /* MR1 */ } if (v>=2) cout<<"Alignment in "<<infile<<" contains "<<L<<" columns and "<<i<<" match states\n"; L = i; //Number of match states delete[] match_state; match_state = NULL; break; } //end switch() /////////////////////////////////////////////////////////////////////////// // Error if (unequal_lengths) { strcut(sname[unequal_lengths]); cerr<<endl<<"Error: sequences in "<<infile<<" do not all have the same number of columns, \ne.g. first sequence and sequence "<<sname[unequal_lengths]<<".\n"; if(par.M==1) cerr<<".\nCheck input format for '-M a2m' option and consider using '-M first' or '-M 50'\n"; exit(1); } // Avert user about -cons option? if (v>=2 && !par.cons) { for (i=1; i<=L; i++) if (X[kfirst][i]==GAP) { printf("NOTE: Use the '-cons' option to calculate a consensus sequence as first sequence of the alignment.\n"); break; } } /* MR1 //Replace GAP with ENDGAP for all end gaps for (k=0; k<N_in; k++) { if (!keep[k]) continue; for (i=1; i<=L && X[k][i]==GAP; i++) X[k][i]=ENDGAP; for (i=L; i>=1 && X[k][i]==GAP; i--) X[k][i]=ENDGAP; }*/ // DEBUG if (v>=4) for (k=0; k<N_in; k++) { if (!display[k]) continue; cout<<">"<<sname[k]<<"\n"; if (k==kss_dssp || k==kss_pred) {for (i=1; i<=L; i++) cout<<char(i2ss(X[k][i]));} else if (k==kss_conf) {for (i=1; i<=L; i++) cout<<char(i2cf(X[k][i]));} else if (k==ksa_dssp) {for (i=1; i<=L; i++) cout<<char(i2sa(X[k][i]));} else { for (i=1; i<=L; i++) cout<<char(i2aa(X[k][i])); cout<<"\n"; for (i=1; i<=L; i++) if (I[k][i]==0) cout<<"-"; else if (I[k][i]>9) cout<<"X"; else cout<<I[k][i]; } cout<<"\n"; } delete[](h); h = NULL; } /** * @brief Remove sequences with seq. identity larger than seqid percent *(remove the shorter of two) or coverage<cov_thr * * FIXME: originally max_seqid is a variable that is the cutoff * above which sequences are thrown out. We want to throw out sequences * when building the HMM but not for display, there we want to keep all. * This should be really easy, but there is some hidden stuff going on * in this function, so I did a minimal-invasive change and just stuck * (effectively) a hard-wired 100 instead of the variable. * At a later stage we should get rid of this function alltogether * as it does gobble up some time (and is quadratic in noof sequences, I think) * FS, 2010-10-04 */ //////////////////////////////////////////////////////////////////////////// /* */ inline int Alignment::FilterForDisplay(int max_seqid, int coverage, int qid, float qsc, int N) { /* FIXME * by just returning n_display and not doing anything * I think we display everything and not do any work for it */ return n_display; /* FS, 2010-10-04*/ if (par.mark) return n_display; char *dummy = new(char[N_in+1]); int vtmp=v, seqid; v=0; n_display=0; if (kss_dssp>=0) display[kss_dssp]=KEEP_NOT; if (ksa_dssp>=0) display[ksa_dssp]=KEEP_NOT; if (kss_pred>=0) display[kss_pred]=KEEP_NOT; if (kss_conf>=0) display[kss_conf]=KEEP_NOT; for (seqid=imin(10,max_seqid); n_display<N && seqid<=max_seqid; seqid++) { for (int k=0; k<N_in; k++) dummy[k]=display[k]; n_display = Filter2(dummy,coverage,qid,qsc,20,seqid,0); // printf("Seqid=%3i n_display=%4i\n",seqid,n_display); } if (n_display>N) { for (int k=0; k<N_in; k++) dummy[k]=display[k]; n_display = Filter2(dummy,coverage,qid,qsc,20,--(--seqid),0); } v=vtmp; for (int k=0; k<N_in; k++) display[k]=dummy[k]; if (kss_dssp>=0) {display[kss_dssp]=KEEP_CONDITIONALLY; n_display++;} if (ksa_dssp>=0) {display[ksa_dssp]=KEEP_CONDITIONALLY; n_display++;} if (kss_pred>=0) {display[kss_pred]=KEEP_CONDITIONALLY; n_display++;} if (kss_conf>=0) {display[kss_conf]=KEEP_CONDITIONALLY; n_display++;} delete[] dummy; dummy = NULL; return n_display; } ///////////////////////////////////////////////////////////////////////////////////// // Remove sequences with seq. identity larger than seqid percent (remove the shorter of two) or coverage<cov_thr ///////////////////////////////////////////////////////////////////////////////////// inline int Alignment::Filter(int max_seqid, int coverage, int qid, float qsc, int N) { return Filter2(keep,coverage,qid,qsc,20,max_seqid,N); } ///////////////////////////////////////////////////////////////////////////// /* * @brief Select set of representative sequences in the multiple sequence alignment * * Filter criteria: * Remove sequences with coverage of query less than "coverage" percent * Remove sequences with sequence identity to query of less than "qid" percent * If Ndiff==0, remove sequences with seq. identity larger than seqid2(=max_seqid) percent * If Ndiff>0, remove sequences with minimum-sequence-identity filter of between seqid1 * and seqid2 (%), where the minimum seqid threshold is determined such that, * in all column blocks of at least WMIN=25 residues, at least Ndiff sequences are left. * This ensures that in multi-domain proteins sequences covering one domain are not * removed completely because sequences covering other domains are more diverse. * * Allways the shorter of two compared sequences is removed (=> sort sequences by length first). * Please note: sequence identity of sequence x with y when filtering x is calculated as * number of residues in sequence x that are identical to an aligned residue in y / number of residues in x * Example: two sequences x and y are 100% identical in their overlapping region but one overlaps by 10% of its * length on the left and the other by 20% on the right. Then x has 10% seq.id with y and y has 20% seq.id. with x. */ ////////////////////////////////////////////////////////////////////////////// int Alignment::Filter2(char keep[], int coverage, int qid, float qsc, int seqid1, int seqid2, int Ndiff) { // In the beginnning, keep[k] is 1 for all regular amino acid sequences and 0 for all others (ss_conf, ss_pred,...) // In the end, keep[k] will be 1 for all regular representative sequences kept in the alignment, 0 for all others char* in=new(char[N_in+1]); // in[k]=1: seq k has been accepted; in[k]=0: seq k has not yet been accepted at current seqid char* inkk=new(char[N_in+1]); // inkk[k]=1 iff in[ksort[k]]=1 else 0; int* Nmax=new(int[L+2]); // position-dependent maximum-sequence-identity threshold for filtering /* MR1, used to be called idmax*/ int* idmaxwin=new(int[L+2]); // minimum value of idmax[i-WFIL,i+WFIL] int* seqid_prev=new(int[N_in+1]); // maximum-sequence-identity threshold used in previous round of filtering (with lower seqid) int* N=new(int[L+2]); // N[i] number of already accepted sequences at position i const int WFIL=25; // see previous line int diffNmax=Ndiff; // current maximum difference of Nmax[i] and Ndiff /* MR1 */ int diffNmax_prev=0; // previous maximum difference of Nmax[i] and Ndiff /* MR1 */ int seqid; // current maximum value for the position-dependent maximum-sequence-identity thresholds in idmax[] int seqid_step=0; // previous increment of seqid /* MR1 */ float diff_min_frac; // minimum fraction of differing positions between sequence j and k needed to accept sequence k float qdiff_max_frac=0.9999-0.01*qid; // maximum allowable number of residues different from query sequence int diff; // number of differing positions between sequences j and k (counted so far) int diff_suff; // number of differing positions between sequences j and k that would be sufficient int qdiff_max; // maximum number of residues required to be different from query int cov_kj; // upper limit of number of positions where both sequence k and j have a residue int first_kj; // first non-gap position in sequence j AND k int last_kj; // last non-gap position in sequence j AND k int kk, jj; // indices for sequence from 1 to N_in int k, j; // kk=ksort[k], jj=ksort[j] int i; // counts residues int n; // number of sequences accepted so far // Initialize in[k] for (n=k=0; k<N_in; k++) if (keep[k]==KEEP_ALWAYS) {in[k]=2/*KEEP_ALWAYS??*/; n++;} else in[k]=0; // Determine first[k], last[k]? if (first==NULL) { first=new(int[N_in]);// first non-gap position in sequence k last =new(int[N_in]);// last non-gap position in sequence k for (k=0; k<N_in; k++) // do this for ALL sequences, not only those with in[k]==1 (since in[k] may be display[k]) { for (i=1; i<=L; i++) if (X[k][i]<NAA) break; first[k]=i; for (i=L; i>=1; i--) if (X[k][i]<NAA) break; last[k]=i; } } // Determine number of residues nres[k]? if ( (nres==NULL) || (sizeof(nres)<N_in*sizeof(int)) ) { nres=new(int[N_in]); for (k=0; k<N_in; k++) // do this for ALL sequences, not only those with in[k]==1 (since in[k] may be display[k]) { int nr=0; for (i=first[k]; i<=last[k]; i++) if (X[k][i]<NAA) nr++; nres[k]=nr; // printf("%20.20s nres=%3i first=%3i last=%3i\n",sname[k],nr,first[k],last[k]); } } // Sort sequences according to length; afterwards, nres[ksort[kk]] is sorted by size if (ksort==NULL) { ksort=new(int[N_in]); // never reuse alignment object for new alignment with more sequences for (k=0; k<N_in; k++) ksort[k]=k; QSortInt(nres,ksort,kfirst+1,N_in-1,-1); //Sort sequences after kfirst (query) in descending order } for (kk=0; kk<N_in; kk++) inkk[kk]=in[ksort[kk]]; // Initialize N[i], idmax[i], idprev[i] for (i=1; i<first[kfirst]; i++) N[i]=0; for (i=first[kfirst]; i<=last[kfirst]; i++) N[i]=1; for (i=last[kfirst]+1; i<=L; i++) N[i]=0; //for (i=1; i<=L; i++) {idmax[i]=seqid1; idmaxwin[i]=-1;} for (i=1; i<=L; ++i) {Nmax[i]=0; idmaxwin[i]=-1;} /* MR1 */ for (k=0; k<N_in; k++) seqid_prev[k]=-1; if (Ndiff<=0 || Ndiff>=N_in) {seqid1=seqid2; Ndiff=N_in; diffNmax=Ndiff;} // Check coverage and sim-to-query criteria for each sequence k for (k=0; k<N_in; k++) { if (keep[k]==KEEP_NOT || keep[k]==KEEP_ALWAYS) continue; // seq k not regular sequence OR is marked sequence if (100*nres[k]<coverage*L) {keep[k]=KEEP_NOT; continue;} // coverage too low? => reject once and for all float qsc_sum=0.0; // Check if score-per-column with query is at least qsc if (qsc>-10) { float qsc_min = qsc*nres[k]; // minimum total score of seq k with query int gapq=0, gapk=0; // number of consecutive gaps in query or k'th sequence at position i for (int i=first[k]; i<=last[k]; i++) { if (X[k][i]<20) { gapk=0; if (X[kfirst][i]<20) { gapq=0; qsc_sum += S[(int)X[kfirst][i]][(int)X[k][i]]; } else if (gapq++) qsc_sum-=PLTY_GAPEXTD; else qsc_sum-=PLTY_GAPOPEN; } else if (X[kfirst][i]<20) { gapq=0; if (gapk++) qsc_sum-=PLTY_GAPEXTD; else qsc_sum-=PLTY_GAPOPEN; } } // printf("k=%3i qsc=%6.2f\n",k,qsc_sum); if (qsc_sum<qsc_min) {keep[k]=KEEP_NOT; continue;} // too different from query? => reject once and for all } //Check if sequence similarity with query at least qid? if (qdiff_max_frac<0.999) { qdiff_max=int(qdiff_max_frac*nres[k]+0.9999); // printf("k=%-4i nres=%-4i qdiff_max=%-4i first=%-4i last=%-4i",k,nres[k],qdiff_max,first[k],last[k]); diff=0; for (int i=first[k]; i<=last[k]; i++) // enough different residues to reject based on minimum qid with query? => break if (X[k][i]<20 && X[k][i]!=X[kfirst][i] && ++diff>=qdiff_max) break; // printf(" diff=%4i\n",diff); if (diff>=qdiff_max) {keep[k]=KEEP_NOT; continue;} // too different from query? => reject once and for all } // printf(" qsc=%6.2f qid=%6.2f \n",qsc_sum/nres[k],100.0*(1.0-(float)(diff)/nres[k])); } if (seqid1>seqid2) { for (n=k=0; k<N_in; k++) if (keep[k]>KEEP_NOT) n++; return n; } // Successively increment idmax[i] at positons where N[i]<Ndiff //for (seqid=seqid1; seqid<=seqid2; seqid+=1+(seqid>=50)) /* MR1 */ seqid=seqid1; while (seqid<=seqid2) { /* // Update idmax[i] for (i=1; i<=L; i++) if (N[i]<Ndiff) idmax[i]=seqid; // Update idmaxwin[i] as minimum of idmax[i-WFIL,i+WFIL]. If idmaxwin[] has not changed then stop char stop=1; for (i=1; i<=L; i++) { int idmax_min=seqid2; for (j=imax(1,imin(L-2*WFIL+1,i-WFIL)); j<=imin(L,imax(2*WFIL,i+WFIL)); j++) if (idmax[j]<idmax_min) idmax_min=idmax[j]; if (idmax_min>idmaxwin[i]) stop=0; // idmaxwin[i] has changed => do not stop idmaxwin[i]=idmax_min; } */ char stop=1; // Update Nmax[i] diffNmax_prev = diffNmax; diffNmax = 0; for (i=1; i<=L; ++i) { int max=0; for (j=imax(1,imin(L-2*WFIL+1,i-WFIL)); j<=imin(L,imax(2*WFIL,i+WFIL)); ++j) if (N[j]>max) max=N[j]; if (Nmax[i]<max) Nmax[i]=max; if (Nmax[i]<Ndiff) { stop=0; idmaxwin[i]=seqid; if (diffNmax<Ndiff-Nmax[i]) diffNmax=Ndiff-Nmax[i]; } } //printf("seqid=%3i diffNmax_prev= %-4i diffNmax= %-4i n=%-5i N_in-N_ss=%-5i\n",seqid,diffNmax_prev,diffNmax,n,N_in-N_ss); if (stop) break; // // DEBUG // printf("idmax "); // for (i=1; i<=L; i++) printf("%2i ",idmax[i]); // printf("\n"); // printf("idmaxwin "); // for (i=1; i<=L; i++) printf("%2i ",idmaxwin[i]); // printf("\n"); // printf("N[i] "); // for (i=1; i<=L; i++) printf("%2i ",N[i]); // printf("\n"); // Loop over all candidate sequences kk (-> k) for (kk=0; kk<N_in; kk++) { if (inkk[kk]) continue; // seq k already accepted k=ksort[kk]; if (!keep[k]) continue; // seq k is not regular aa sequence or already suppressed by coverage or qid criterion if (keep[k]==KEEP_ALWAYS) {inkk[kk]=2; continue;} // accept all marked sequences (no n++, since this has been done already) // Calculate max-seq-id threshold seqidk for sequence k (as maximum over idmaxwin[i]) if (seqid>=100) {in[k]=inkk[kk]=1; n++; continue;} float seqidk=seqid1; for (i=first[k]; i<=last[k]; i++) if (idmaxwin[i]>seqidk) seqidk=idmaxwin[i]; if (seqid==seqid_prev[k]) continue; // sequence has already been rejected at this seqid threshold => reject this time seqid_prev[k]=seqid; diff_min_frac =0.9999-0.01*seqidk; // min fraction of differing positions between sequence j and k needed to accept sequence k // Loop over already accepted sequences for (jj=0; jj<kk; jj++) { if (!inkk[jj]) continue; j=ksort[jj]; first_kj=imax(first[k],first[j]); last_kj =imin(last[k],last[j]); cov_kj = last_kj-first_kj+1; diff_suff=int(diff_min_frac*imin(nres[k],cov_kj)+0.999); // nres[j]>nres[k] anyway because of sorting /* MR1 0.999 */ diff=0; for (int i=first_kj; i<=last_kj; i++) { // enough different residues to accept? => break if (X[k][i]>=NAA || X[j][i]>=NAA) cov_kj--; else if (X[k][i]!=X[j][i] && ++diff>=diff_suff) break; // accept (k,j) } // // DEBUG // printf("%20.20s with %20.20s: diff=%i diff_min_frac*cov_kj=%f diff_suff=%i nres=%i cov_kj=%i\n",sname[k],sname[j],diff,diff_min_frac*cov_kj,diff_suff,nres[k],cov_kj); // printf("%s\n%s\n\n",seq[k],seq[j]); //if (float(diff)<fmin(diff_min_frac*cov_kj,diff_suff)) break; //similarity > acceptace threshold? Reject! /* MR1 */ if (diff<diff_suff && float(diff)<=diff_min_frac*cov_kj) break; //dissimilarity < acceptace threshold? Reject! /* MR1 */ } if (jj>=kk) // did loop reach end? => accept k. Otherwise reject k (the shorter of the two) { in[k]=inkk[kk]=1; n++; for (i=first[k]; i<=last[k]; i++) N[i]++; // update number of sequences at position i // printf("%i %20.20s accepted\n",k,sname[k]); } // else // { // printf("%20.20s rejected: too similar with seq %20.20s diff=%i diff_min_frac*cov_kj=%f diff_suff=%i nres=%i cov_kj=%i\n",sname[k],sname[j],diff,diff_min_frac*cov_kj,diff_suff,nres[k],cov_kj); // printf("%s\n%s\n\n",seq[k],seq[j]); // } } // End Loop over all candidate sequences kk // // DEBUG // printf("\n"); // printf("seqid_prev[k]= \n"); // for (k=0; k<N_in; k++) printf("%2i ",seqid_prev[k]); // printf("\n"); // Increment seqid /* MR1 */ seqid_step = imax(1,imin(5,diffNmax/(diffNmax_prev-diffNmax+1)*seqid_step/2)); seqid += seqid_step; } // End Loop over seqid if (v>=2) { printf("%i out of %i sequences passed filter (",n,N_in-N_ss); if (par.coverage) printf("%i%% min coverage, ",coverage); if (qid) printf("%i%% min sequence identity to query, ",qid); if (qsc>-10) printf("%.2f bits min score per column to query, ",qsc); if (Ndiff<N_in && Ndiff>0) printf("up to %i%% position-dependent max pairwise sequence identity)\n",seqid); else printf("%i%% max pairwise sequence identity)\n",seqid1); } for (k=0; k<N_in; k++) keep[k]=in[k]; delete[] in; in = NULL; delete[] inkk; inkk = NULL; //delete[] idmax; idmax = NULL; delete[] Nmax; /* MR1 */ delete[] idmaxwin; idmaxwin = NULL; delete[] seqid_prev; seqid_prev = NULL; delete[] N; N = NULL; #if 0 printf("%s:%s:%d: sequences accepted = %d/%d\n", __FUNCTION__, __FILE__, __LINE__, n, N_in-N_ss); #endif return n; } /* MR1: the Alignment::HomologyFilter is no longer needed in hhalign-stand-alone */ ///////////////////////////////////////////////////////////////////////////// /** * @brief Filter for min score per column coresc with core query profile, * defined by coverage_core and qsc_core */ ///////////////////////////////////////////////////////////////////////////// int Alignment::HomologyFilter(int coverage_core, float qsc_core, float coresc) { const int seqid_core=90; //maximum sequence identity in core alignment const int qid_core=0; const int Ndiff_core=0; int n; HMM qcore; char* coreseq=new(char[N_in]); // coreseq[k]=1 if sequence belongs to core of alignment (i.e. it is very similar to query) for (int k=0; k<N_in; k++) coreseq[k]=keep[k]; // Copy keep[] into coreseq[] // Remove sequences with seq. identity larger than seqid percent (remove the shorter of two) int v1=v; v=1; n = Filter2(coreseq,coverage_core,qid_core,qsc_core,seqid_core,seqid_core,Ndiff_core); v=v1; if (v>=2) { printf("%i out of %i core alignment sequences passed filter (",n,N_in-N_ss); if (par.coverage_core) printf("%i%% min coverage, ",coverage_core); if (qid_core) printf("%i%% min sequence identity to query, ",qid_core); if (qsc_core>-10) printf("%.2f bits min score per column to query, ",qsc_core); printf("%i%% max pairwise sequence identity)\n",seqid_core); } // Calculate bare AA frequencies and transition probabilities -> qcore.f[i][a], qcore.tr[i][a] FrequenciesAndTransitions(qcore,coreseq); // Add transition pseudocounts to query -> q.p[i][a] (gapd=1.0, gape=0.333, gapf=gapg=1.0, gaph=gapi=1.0, gapb=1.0 qcore.AddTransitionPseudocounts(1.0,0.333,1.0,1.0,1.0,1.0,1.0); // Generate an amino acid frequency matrix from f[i][a] with full pseudocount admixture (tau=1) -> g[i][a] qcore.PreparePseudocounts(); // Add amino acid pseudocounts to query: qcore.p[i][a] = (1-tau)*f[i][a] + tau*g[i][a] qcore.AddAminoAcidPseudocounts(2,1.5,2.0,1.0); // pcm=2, pca=1.0, pcb=2.5, pcc=1.0 // Filter out all sequences below min score per column with qcore n=FilterWithCoreHMM(keep, coresc, qcore); if (v>=2) cout<<n<<" out of "<<N_in-N_ss<<" sequences filtered by minimum score-per-column threshold of "<<qsc_core<<"\n"; delete[] coreseq; coreseq = NULL; return n; } ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Filter out all sequences below a minimum score per column with profile qcore */ int Alignment::FilterWithCoreHMM(char in[], float coresc, HMM& qcore) { int k; // count sequences in alignment int i; // column in query alignment int a; // amino acid (0..19) int n=1; // number of sequences that passed filter float** logodds=new(float*[L+1]); // log-odds ratios for HMM qcore char gap; // 1: previous state in seq k was a gap 0: previous state in seq k was an amino acid float score; // score of sequence k aligned with qcore for (i=1; i<=L; i++) logodds[i]=new(float[21]); // Determine first[k], last[k]? if (first==NULL) { first=new(int[N_in]);// first non-gap position in sequence k last =new(int[N_in]);// last non-gap position in sequence k for (k=0; k<N_in; k++) // do this for ALL sequences, not only those with in[k]==1 (since in[k] may be display[k]) { for (i=1; i<=L; i++) if (X[k][i]<NAA) break; first[k]=i; for (i=L; i>=1; i--) if (X[k][i]<NAA) break; last[k]=i; } } // Determine number of residues nres[k]? if (nres==NULL) { nres=new(int[N_in]); for (k=0; k<N_in; k++) // do this for ALL sequences, not only those with in[k]==1 (since in[k] may be display[k]) { int nr=0; for (i=first[k]; i<=last[k]; i++) if (X[k][i]<NAA) nr++; nres[k]=nr; // printf("%20.20s nres=%3i first=%3i last=%3i\n",sname[k],nr,f,l); } } // Precalculate the log-odds for qcore for (i=1; i<=L; i++) { for (a=0; a<NAA; a++) logodds[i][a]=fast_log2(qcore.p[i][a]/pb[a]); logodds[i][ANY]=-0.5; // half a bit penalty for X // printf(" A R N D C Q E G H I L K M F P S T W Y V\n"); // printf("%6i ",i); // for (a=0; a<20; ++a) fprintf(stdout,"%5.1f ",100*qcore.f[i][a]); // printf("\n"); // printf(" "); // for (a=0; a<20; ++a) fprintf(stdout,"%5.1f ",100*qcore.g[i][a]); // printf("\n"); // printf(" "); // for (a=0; a<20; ++a) fprintf(stdout,"%5.1f ",100*qcore.p[i][a]); // printf("\n"); // printf(" "); // for (a=0; a<20; ++a) fprintf(stdout,"%5.1f ",100*pb[a]); // printf("\n"); // printf(" "); // for (a=0; a<20; ++a) fprintf(stdout,"%5.2f ",fast_log2(qcore.p[i][a]/pb[a])); // printf("\n"); } // Main loop: test all sequences k for (k=kfirst+1; k<N_in; k++) { if (!in[k]) continue; // if in[k]==0 sequence k will be suppressed directly float score_M=0.0; float score_prev=0.0; // Calculate score of sequence k with core HMM score=0; gap=0; for (i=first[k]; i<=last[k]; i++) { score_M=0.0; if (X[k][i]<=ANY) // current state is Match { score_M=logodds[i][ (int)X[k][i]]; score+=logodds[i][ (int)X[k][i]]; if (gap) score+=qcore.tr[i][D2M]; else score+=qcore.tr[i][M2M]; gap=0; } else if (X[k][i]==GAP) // current state is Delete (ignore ENDGAPs) { if (gap) score+=qcore.tr[i][D2D]; else score+=qcore.tr[i][M2D]; gap=1; } if (I[k][i]) score+=qcore.tr[i][M2I]+(I[k][i]-1)*qcore.tr[i][I2I]+qcore.tr[i][I2M]; // if (k==2) printf("i=%3i %c:%c score_M=%6.2f score=%6.2f score_sum=%6.2f \n",i,i2aa(X[kfirst][i]),i2aa(X[k][i]),score_M,score-score_prev,score); score_prev=score; } printf("k=%3i score=%6.2f\n",k,score); if (score<nres[k]*coresc) in[k]=0; else n++;// reject sequence k? } for (i=1; i<=L; i++){ delete[] logodds[i]; logodds[i] = NULL; } delete[] logodds; logodds = NULL; return n; } /* MR1 */ #if 0 ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Filter alignment to given diversity/Neff */ bool Alignment::FilterNeff() { int v1=v; v=v1-1; const float TOLX=0.001; const float TOLY=0.02; char dummy[N_in+1]; for (int k=0; k<N_in; ++k) dummy[k]=keep[k]; float x=0.0,y=0.0; float x0=-1.0; float x1=+2.0; float y0=filter_by_qsc(x0,dummy); float y1=filter_by_qsc(x1,dummy); int i=2; while (y0-par.Neff>0 && par.Neff-y1>0) { x = x0 + (par.Neff-y0)*(x1-x0)/(y1-y0); // linear interpolation between (x0,y0) and (x1,y1) y = filter_by_qsc(x,dummy); if (v>=2) printf(" %3i x0=%6.3f -> %6.3f x=%6.3f -> %6.3f x1=%6.3f -> %6.3f \n",++i,x0,y0,x,y,x1,y1); if (y>par.Neff) {x0=x; y0=y;} else {x1=x; y1=y;} if (fabs(par.Neff-y)<TOLY || x1-x0<TOLX) break; } v=v1; if (y0>=par.Neff && y1<=par.Neff) { // Write filtered alignment WITH insert states (lower case) to alignment file if (v>=2) printf("Found Neff=%6.3f at filter threshold qsc=%6.3f\n",y,x); return true; } else if (v>=1) printf("Diversity of unfiltered alignment %.2f is below target diversity %.2f. No alignment written\n",y0,par.Neff); return false; } float Alignment::filter_by_qsc(float qsc, char* dummy) { HMM q; for (int k=0; k<N_in; ++k) keep[k]=dummy[k]; Filter2(keep,par.coverage,0,qsc,par.max_seqid+1,par.max_seqid,0); FrequenciesAndTransitions(q); // printf("qsc=%4.1f N_filtered=%-3i Neff=%6.3f\n",qsc,n,q.Neff_HMM); return q.Neff_HMM; } #endif ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Calculate AA frequencies q.p[i][a] and transition probabilities q.tr[i][a] from alignment */ void Alignment::FrequenciesAndTransitions(HMM& q, char* in) { int k; // index of sequence int i; // position in alignment int a; // amino acid (0..19) int ni[NAA+3]; // number of times amino acid a occurs at position i int naa; // number of different amino acids if (v>=3) cout<<"Calculating position-dependent weights on subalignments\n"; if (in==NULL) in=keep; // what's this good for? if (N_filtered>1) { for (k=0; k<N_in; k++) wg[k]=0.0; // initialized wg[k] // Calculate global weights for (i=1; i<=L; i++) // for all positions i in alignment { for (a=0; a<20; a++) ni[a]=0; for (k=0; k<N_in; k++) if (in[k]) ni[ (int)X[k][i]]++; naa=0; for (a=0; a<20; a++) if(ni[a]) naa++; if (!naa) naa=1; //naa=0 when column consists of only gaps and Xs (=ANY) for (k=0; k<N_in; k++) if (in[k] && X[k][i]<20) wg[k] += 1.0/float(ni[ (int)X[k][i]]*naa*(nres[k]+30)); // ensure that each residue of a short sequence contributes as much as a residue of a long sequence: // contribution is proportional to one over sequence length nres[k] plus 30. } NormalizeTo1(wg,N_in); // Do pos-specific sequence weighting and calculate amino acid frequencies and transitions for (k=0; k<N_in; k++) X[k][0]=ENDGAP; // make sure that sequences ENTER subalignment j for j=1 for (k=0; k<N_in; k++) X[k][L+1]=ENDGAP; // does it have an influence? #ifdef HAVE_OPENMP if(par.wg != 1) { #pragma omp parallel sections { #pragma omp section Amino_acid_frequencies_and_transitions_from_M_state(q,in); // use subalignments of seqs with residue in i #pragma omp section Transitions_from_I_state(q,in); // use subalignments of seqs with insert in i #pragma omp section Transitions_from_D_state(q,in); // use subalignments of seqs with delete in i. Must be last of these three calls if par.wg==1! } } else { #pragma omp parallel sections { #pragma omp section Amino_acid_frequencies_and_transitions_from_M_state(q,in); // use subalignments of seqs with residue in i #pragma omp section Transitions_from_I_state(q,in); // use subalignments of seqs with insert in i } Transitions_from_D_state(q,in); // use subalignments of seqs with delete in i. Must be last of these three calls if par.wg==1! } #else Amino_acid_frequencies_and_transitions_from_M_state(q,in); Transitions_from_I_state(q,in); Transitions_from_D_state(q,in); #endif } else // N_filtered==1 { X[kfirst][0]=X[kfirst][L+1]=ANY; // (to avoid anallowed access within loop) q.Neff_HMM=1.0f; for (i=0; i<=L+1; i++) // for all positions i in alignment { q.Neff_M[i]=1.0f; q.Neff_I[i]=q.Neff_D[i]=0.0f; for (a=0; a<20; a++) q.f[i][a]=0.0; /* this is the crucial change that makes terminal-X work */ //q.f[i][ (int)(X[kfirst][i]) ] = 1.0; /* MR1 */ if (X[kfirst][i] < ANY) /* MR1 */ q.f[i][(unsigned int) X[kfirst][i] ] = 1.0; else for (a=0; a<20; ++a) q.f[i][a]=pb[a]; q.tr[i][M2M]=0; q.tr[i][M2I]=-100000.0; q.tr[i][M2D]=-100000.0; q.tr[i][I2M]=-100000.0; q.tr[i][I2I]=-100000.0; q.tr[i][D2M]=-100000.0; q.tr[i][D2D]=-100000.0; } q.tr[0][I2M]=0; q.tr[L][I2M]=0; q.tr[0][D2M]=0; q.Neff_M[0]=q.Neff_I[0]=q.Neff_D[0]=99.999; // Neff_av[0] is used for calculation of transition pseudocounts for the start state } if (v>=3) { printf("\nMatches:\n"); printf("col Neff nseqs\n"); for (i=1; i<=imin(L,100); i++) printf("%3i %5.2f %3i\n",i,q.Neff_M[i],nseqs[i]); printf("\nInserts:\n"); printf("col Neff nseqs\n"); for (i=1; i<=imin(L,100); i++) printf("%3i %5.2f %3i\n",i,q.Neff_I[i],nseqs[i]); printf("\nDeletes:\n"); printf("col Neff nseqs\n"); for (i=1; i<=imin(L,100); i++) printf("%3i %5.2f %3i\n",i,q.Neff_D[i],nseqs[i]); } // Copy column information into HMM q q.L=L; q.N_in=N_in; q.N_filtered=N_filtered; for (i=1; i<=L; i++) q.l[i]=l[i]; // Set names in HMM q if (strlen(q.name)==0) strcpy(q.name,name); if (strlen(q.longname)==0) strcpy(q.longname,longname); if (strlen(q.fam)==0) strcpy(q.fam,fam); ScopID(q.cl,q.fold,q.sfam,q.fam); // derive superfamily, fold and class code from family name strcpy(q.file,file); // Store basename of alignment file name in q.file // Copy sequences to be displayed into HMM q.nss_dssp=q.nsa_dssp=q.nss_pred=q.nss_conf=q.nfirst=-1; int n=0; if (kss_dssp>=0) q.nss_dssp=n++; // copy dssp sequence? if (ksa_dssp>=0) q.nsa_dssp=n++; // copy dssp sequence? if (kss_pred>=0) q.nss_pred=n++; // copy psipred sequence? if (kss_conf>=0) q.nss_conf=n++; // copy confidence value sequence? // Calculate consensus sequence? if (par.showcons || par.cons) { float maxw; int maxa; if (par.showcons) { // Reserve space for consensus/conservation sequence as Q-T alignment mark-up q.ncons=n++; q.sname[q.ncons]=new(char[10]); if (!q.sname[q.ncons]) {MemoryError("array of names for displayed sequences");} strcpy(q.sname[q.ncons],"Consensus"); q.seq[q.ncons]=new(char[L+2]); if (!q.seq[q.ncons]) {MemoryError("array of names for displayed sequences");} } if (par.cons) { // Reserve space for consensus sequence as first sequence in alignment q.nfirst=n++; kfirst=-1; q.sname[q.nfirst]=new(char[strlen(name)+11]); if (!q.sname[q.nfirst]) {MemoryError("array of names for displayed sequences");} strcpy(q.sname[q.nfirst],name); strcat(q.sname[q.nfirst],"_consensus"); q.seq[q.nfirst]=new(char[L+2]); if (!q.seq[q.nfirst]) {MemoryError("array of names for displayed sequences");} } // Calculate consensus amino acids using similarity matrix for (i=1; i<=L; i++) { maxw=0.0; maxa=0; for (a=0; a<20; a++) if (q.f[i][a]-pb[a]>maxw) {maxw = q.f[i][a]-pb[a]; maxa = a;} if (par.showcons) { maxw =0.0; for (int b=0; b<20; b++) maxw += q.f[i][b]*Sim[maxa][b]*Sim[maxa][b]; maxw *= q.Neff_M[i]/(q.Neff_HMM+1); // columns with many gaps don't get consensus symbol if (maxw>0.6) q.seq[q.ncons][i] = uprchr(i2aa(maxa)); else if (maxw>0.4) q.seq[q.ncons][i] = lwrchr(i2aa(maxa)); else q.seq[q.ncons][i] = 'x'; } if (par.cons) q.seq[q.nfirst][i] = uprchr(i2aa(maxa)); } if (par.showcons) { q.seq[q.ncons][0]='-'; q.seq[q.ncons][L+1]='\0'; } if (par.cons) { q.seq[q.nfirst][0]='-'; q.seq[q.nfirst][L+1]='\0'; } } // Copy sequences to be displayed from alignment to HMM for (k=0; k<N_in; k++) { int nn; if (display[k]) { if (0 && (n>=MAXSEQDIS)) { /* FIXME: the test was if(n>=MAXSEQDIS), this test was necessary because alignment memory was static, now it should be dynamic, and should always have the right size, there are at least number-of-sequences plus a 'bit' more however, I do not know what that 'bit' is likely to be (in the future). at the moment it is 1 for the consnseus and 1 for structure, but this might change (FS) */ if (par.mark) cerr<<"WARNING: maximum number "<<MAXSEQDIS<<" of sequences for display of alignment exceeded\n"; break; } if (k==kss_dssp) nn=q.nss_dssp; // copy dssp sequence to nss_dssp else if (k==ksa_dssp) nn=q.nsa_dssp; else if (k==kss_pred) nn=q.nss_pred; else if (k==kss_conf) nn=q.nss_conf; else if (k==kfirst) nn=q.nfirst=n++; else nn=n++; // strcut(sname[k]," "); // delete rest of name line beginning with two spaces " " // Why this?? Problem for pdb seqs without chain q.sname[nn]=new(char[strlen(sname[k])+1]); if (!q.sname[nn]) {MemoryError("array of names for displayed sequences");} strcpy(q.sname[nn],sname[k]); q.seq[nn]=new(char[strlen(seq[k])+1]); if (!q.seq[nn]) {MemoryError("array of names for displayed sequences");} strcpy(q.seq[nn],seq[k]); } } q.n_display=n; // how many sequences to be displayed in alignments? // Copy secondary structure information into HMM if (kss_dssp>=0) for (i=1; i<=L; i++) q.ss_dssp[i]=X[kss_dssp][i]; if (ksa_dssp>=0) for (i=1; i<=L; i++) q.sa_dssp[i]=X[ksa_dssp][i]; if (kss_pred>=0) { for (i=1; i<=L; i++) q.ss_pred[i]=X[kss_pred][i]; if (kss_conf>=0) for (i=1; i<=L; i++) q.ss_conf[i]=X[kss_conf][i]; else for (i=1; i<=L; i++) q.ss_conf[i]=5; } q.lamda=0.0; q.mu=0.0; // Debug: print occurence of amino acids for each position i if (v>=2) printf("Effective number of sequences exp(entropy) = %-4.1f\n",q.Neff_HMM); //PRINT if (v>=3) { cout<<"\nMatr: "; for (a=0; a<20; a++) printf("%4.1f ",100*pb[a]); cout<<"\nAmino acid frequencies without pseudocounts:\n"; cout<<" A R N D C Q E G H I L K M F P S T W Y V\n"; for (i=1; i<=L; i++) { printf("%3i: ",i); for (a=0; a<20; a++) printf("%4.0f ",100*q.f[i][a]); cout<<endl; } cout<<"\n"; printf("\nListing transition probabilities without pseudocounts:\n"); printf(" i M->M M->I M->D I->M I->I D->M D->D Neff_M Neff_I Neff_D\n"); for (i=0; i<=L; i++) { printf("%4i %6.3f %6.3f %6.3f ",i,pow(2.0,q.tr[i][M2M]),pow(2.0,q.tr[i][M2I]),pow(2.0,q.tr[i][M2D])); printf("%6.3f %6.3f ",pow(2.0,q.tr[i][I2M]),pow(2.0,q.tr[i][I2I])); printf("%6.3f %6.3f ",pow(2.0,q.tr[i][D2M]),pow(2.0,q.tr[i][D2D])); printf("%6.3f %6.3f %6.3f\n",q.Neff_M[i],q.Neff_I[i],q.Neff_D[i]); } } q.trans_lin=0; q.has_pseudocounts=false; /* MR1 */ return; } ///////////////////////////////////////////////////////////////////////////////////// /* * FIXME: one of the most time consuming routines (according to gprof on r112) */ /** * @brief Calculate freqs q.f[i][a] and transitions q.tr[i][a] (a=MM,MI,MD) with pos-specific subalignments * Pos-specific weights are calculated like in "GetPositionSpecificWeights()" */ void Alignment::Amino_acid_frequencies_and_transitions_from_M_state(HMM& q, char* in) { // Calculate position-dependent weights wi[k] for each i. // For calculation of weights in column i use sub-alignment // over sequences which have a *residue* in column i (no gap, no end gap) // and over columns where none of these sequences has an end gap. // This is done by updating the arrays n[j][a] at each step i-1->i while letting i run from 1 to L. // n[j][a] = number of occurences of amino acid a at column j of the subalignment, // => only columns with n[j][ENDGAP]=0 are contained in the subalignment! // If no sequences enter or leave the subalignment at the step i-1 -> i (i.e. change=0) // then the old values wi[k], Neff[i-1], and ncol are used for the new position i. // Index a can be an amino acid (0-19), ANY=20, GAP=21, or ENDGAP=22 int k; // index of sequence int i,j; // position in alignment int a; // amino acid (0..19) int naa; // number of different amino acids int** n; // n[j][a] = number of seq's with some residue at column i AND a at position j //float wi[MAXSEQ]; // weight of sequence k in column i, calculated from subalignment i float *wi=NULL; // weight of sequence k in column i, calculated from subalignment i //float Neff[MAXRES]; // diversity of subalignment i float *Neff = new(float[par.maxResLen]); // diversity of subalignment i int nseqi=0; // number of sequences in subalignment i int ncol=0; // number of columns j that contribute to Neff[i] char change; // has the set of sequences in subalignment changed? 0:no 1:yes float fj[NAA+3]; // to calculate entropy float sum; wi = new(float[N_in+2]); // Global weights? if (par.wg==1) for (k=0; k<N_in; k++) wi[k]=wg[k]; // Initialization q.Neff_HMM=0.0f; Neff[0]=0.0; // if the first column has no residues (i.e. change==0), Neff[i]=Neff[i-1]=Neff[0] n = new(int*[L+2]); for (j=1; j<=L; j++) n[j]=new(int[NAA+3]); for (j=1; j<=L; j++) for (a=0; a<NAA+3; a++) n[j][a]=0; ////////////////////////////////////////////////////////////////////////////////////////////// // Main loop through alignment columns for (i=1; i<=L; i++) // Calculate wi[k] at position i as well as Neff[i] { if (par.wg==0) { change=0; // Check all sequences k and update n[j][a] and ri[j] if necessary for (k=0; k<N_in; k++) { if (!in[k]) continue; if (X[k][i-1]>=ANY && X[k][i]<ANY) { // ... if sequence k was NOT included in i-1 and has to be included for column i change=1; nseqi++; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]++; } else if (X[k][i-1]<ANY && X[k][i]>=ANY) { // ... if sequence k WAS included in i-1 and has to be thrown out for column i change=1; nseqi--; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]--; } } //end for (k) nseqs[i]=nseqi; // If subalignment changed: update weights wi[k] and Neff[i] if (change) { // Initialize weights and numbers of residues for subalignment i ncol=0; for (k=0; k<N_in; k++) wi[k]=1E-8; // for pathological alignments all wi[k] can get 0; /* MR1 */ // sum wi[k] over all columns j and sequences k of subalignment for (j=1; j<=L; j++) { // do at least a fraction MAXENDGAPFRAC of sequences in subalignment contain an end gap in j? if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; naa=0; for (a=0; a<20; a++) if(n[j][a]) naa++; if (naa==0) continue; ncol++; for (k=0; k<N_in; k++) { if (in[k] && X[k][i]<ANY && X[k][j]<ANY) { // if (!n[j][ (int)X[k][j]]) {fprintf(stderr,"Error: Mi=%i: n[%i][X[%i]]=0! (X[%i]=%i)\n",i,j,k,k,X[k][j]);} wi[k]+=1.0/float(n[j][ (int)X[k][j] ]*naa); } } } // Check whether number of columns in subalignment is sufficient if (ncol<NCOLMIN) // Take global weights for (k=0; k<N_in; k++) if(in[k] && X[k][i]<ANY) wi[k]=wg[k]; else wi[k]=0.0; // Calculate Neff[i] Neff[i]=0.0; for (j=1; j<=L; j++) { // do at least a fraction MAXENDGAPFRA of sequences in subalignment contain an end gap in j? if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; for (a=0; a<20; a++) fj[a]=0; for (k=0; k<N_in; k++) if (in[k] && X[k][i]<ANY && X[k][j]<ANY) fj[ (int)X[k][j] ]+=wi[k]; NormalizeTo1(fj,NAA); for (a=0; a<20; a++) if (fj[a]>1E-10) Neff[i]-=fj[a]*fast_log2(fj[a]); } if (ncol>0) Neff[i]=pow(2.0,Neff[i]/ncol); else Neff[i]=1.0; } else //no update was necessary; copy values for i-1 { Neff[i]=Neff[i-1]; } } // Calculate amino acid frequencies q.f[i][a] from weights wi[k] for (a=0; a<20; a++) q.f[i][a]=0; for (k=0; k<N_in; k++) if (in[k]) q.f[i][ (int)X[k][i] ]+=wi[k]; NormalizeTo1(q.f[i],NAA,pb); // Calculate transition probabilities from M state q.tr[i][M2M]=q.tr[i][M2D]=q.tr[i][M2I]=0.0; for (k=0; k<N_in; k++) //for all sequences { if (!in[k]) continue; //if input alignment is local ignore transitions from and to end gaps if (X[k][i]<ANY) //current state is M { if (I[k][i]) //next state is I q.tr[i][M2I]+=wi[k]; else if (X[k][i+1]<=ANY) //next state is M q.tr[i][M2M]+=wi[k]; else if (X[k][i+1]==GAP) //next state is D q.tr[i][M2D]+=wi[k]; } } // end for(k) // Normalize and take log sum = q.tr[i][M2M]+q.tr[i][M2I]+q.tr[i][M2D]+FLT_MIN; q.tr[i][M2M]=log2(q.tr[i][M2M]/sum); q.tr[i][M2I]=log2(q.tr[i][M2I]/sum); q.tr[i][M2D]=log2(q.tr[i][M2D]/sum); // for (k=0; k<N_in; k++) if (in[k]) w[k][i]=wi[k]; } // DD TODO:fill in all the missing Neff values // end loop through alignment columns i ////////////////////////////////////////////////////////////////////////////////////////////// delete[](wi); wi=NULL; // delete n[][] for (j=1; j<=L; j++){ delete[](n[j]); (n[j]) = NULL; } delete[](n); (n) = NULL; q.tr[0][M2M]=0; q.tr[0][M2I]=-100000; q.tr[0][M2D]=-100000; q.tr[L][M2M]=0; q.tr[L][M2I]=-100000; q.tr[L][M2D]=-100000; q.Neff_M[0]=99.999; // Neff_av[0] is used for calculation of transition pseudocounts for the start state // Set emission probabilities of zero'th (begin) state and L+1st (end) state to background probabilities for (a=0; a<20; a++) q.f[0][a]=q.f[L+1][a]=pb[a]; // Assign Neff_M[i] and calculate average over alignment, Neff_M[0] if (par.wg==1) { for (i=1; i<=L; i++) { float sum=0.0f; for (a=0; a<20; a++) if (q.f[i][a]>1E-10) sum -= q.f[i][a]*fast_log2(q.f[i][a]); q.Neff_HMM+=pow(2.0,sum); } q.Neff_HMM/=L; float Nlim=fmax(10.0,q.Neff_HMM+1.0); // limiting Neff float scale=log2((Nlim-q.Neff_HMM)/(Nlim-1.0)); // for calculating Neff for those seqs with inserts at specific pos for (i=1; i<=L; i++) { float w_M=-1.0/N_filtered; for (k=0; k<N_in; k++) if (in[k] && X[k][i]<=ANY) w_M+=wg[k]; if (w_M<0) q.Neff_M[i]=1.0; else q.Neff_M[i] = Nlim - (Nlim-1.0)*fpow2(scale*w_M); // fprintf(stderr,"M i=%3i ncol=--- Neff_M=%5.2f Nlim=%5.2f w_M=%5.3f Neff_M=%5.2f\n",i,q.Neff_HMM,Nlim,w_M,q.Neff_M[i]); } } else { for (i=1; i<=L; i++) { q.Neff_HMM+=Neff[i]; q.Neff_M[i]=Neff[i]; if (q.Neff_M[i] == 0) { q.Neff_M[i] = 1; } /* MR1 */ } q.Neff_HMM/=L; } delete[] Neff; Neff = NULL; return; } /* this is the end of Alignment::Amino_acid_frequencies_and_transitions_from_M_state() */ ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Calculate transitions q.tr[i][a] (a=DM,DD) with pos-specific subalignments */ void Alignment::Transitions_from_I_state(HMM& q, char* in) { // Calculate position-dependent weights wi[k] for each i. // For calculation of weights in column i use sub-alignment // over sequences which have a INSERT in column i // and over columns where none of these sequences has an end gap. // This is done by calculating the arrays n[j][a] and rj[j] at each step i-1->i while letting i run from 1 to L. // n[j][a] = number of occurences of amino acid a at column j of the subalignment, // => only columns with n[j][ENDGAP]=0 are contained in the subalignment! // If no sequences enter or leave the subalignment at the step i-1 -> i (i.e. change=0) // then the old values wi[k], Neff[i-1], and ncol are used for the new position i. // Index a can be an amino acid (0-19), ANY=20, GAP=21, or ENDGAP=22 int k; // index of sequence int i,j; // position in alignment int a; // amino acid (0..19) int naa; // number of different amino acids int** n; // n[j][a] = number of seq's with some residue at column i AND a at position j //float wi[MAXSEQ]; // weight of sequence k in column i, calculated from subalignment i float *wi = NULL; // weight of sequence k in column i, calculated from subalignment i //float Neff[MAXRES]; // diversity of subalignment i float *Neff = new(float[par.maxResLen]); // diversity of subalignment i int nseqi; // number of sequences in subalignment i int ncol; // number of columns j that contribute to Neff[i] float fj[NAA+3]; // to calculate entropy float sum; float Nlim=0.0; // only for global weights float scale=0.0; // only for global weights wi = new(float[N_in+2]); // Global weights? if (par.wg==1) { for (k=0; k<N_in; k++) wi[k]=wg[k]; Nlim=fmax(10.0,q.Neff_HMM+1.0); // limiting Neff scale=log2((Nlim-q.Neff_HMM)/(Nlim-1.0)); // for calculating Neff for those seqs with inserts at specific pos } // Initialization n = new(int*[L+2]); for (j=1; j<=L; j++) n[j]=new(int[NAA+3]); ////////////////////////////////////////////////////////////////////////////////////////////// // Main loop through alignment columns for (i=1; i<=L; i++) // Calculate wi[k] at position i as well as Neff[i] { if (par.wg==0) // local weights? { // Calculate n[j][a] and ri[j] nseqi=0; for (k=0; k<N_in; k++) { if (in[k] && I[k][i]>0) { if (nseqi==0) // Initialize only if inserts present! Otherwise O(L*L) even for single sequences! { // Initialization of n[j][a] for (j=1; j<=L; j++) for (a=0; a<NAA+3; a++) n[j][a]=0; } nseqi++; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]++; } } //end for (k) nseqs[i]=nseqi; // If there is no sequence in subalignment j ... if (nseqi==0) { ncol=0; Neff[i]=0.0; // effective number of sequence = 0! q.tr[i][I2M]=-100000; q.tr[i][I2I]=-100000; continue; } // update weights wi[k] and Neff[i] // if (1) { // Initialize weights and numbers of residues for subalignment i ncol=0; for (k=0; k<N_in; k++) wi[k]=0.0; // sum wi[k] over all columns j and sequences k of subalignment for (j=1; j<=L; j++) { if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; naa=0; for (a=0; a<20; a++) if(n[j][a]) naa++; if (naa==0) continue; ncol++; for (k=0; k<N_in; k++) { if (in[k] && I[k][i]>0 && X[k][j]<ANY) { if (!n[j][ (int)X[k][j]]) {fprintf(stderr,"Error: Ii=%i: n[%i][X[%i]]=0! (X[%i]=%i)\n",i,j,k,k,X[k][j]);} wi[k]+=1.0/float(n[j][ (int)X[k][j] ]*naa); } } } // Check whether number of columns in subalignment is sufficient if (ncol>=NCOLMIN) // Take global weights for (k=0; k<N_in; k++) if(in[k] && I[k][i]>0) wi[k]=wg[k]; else wi[k]=0.0; // Calculate Neff[i] Neff[i]=0.0; for (j=1; j<=L; j++) { if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; for (a=0; a<20; a++) fj[a]=0; for (k=0; k<N_in; k++) if (in[k] && I[k][i]>0 && X[k][j]<ANY) fj[ (int)X[k][j] ]+=wi[k]; NormalizeTo1(fj,NAA); for (a=0; a<20; a++) if (fj[a]>1E-10) Neff[i]-=fj[a]*fast_log2(fj[a]); } if (ncol>0) Neff[i]=pow(2.0,Neff[i]/ncol); else Neff[i]=1.0; } // Calculate transition probabilities from I state q.tr[i][I2M]=q.tr[i][I2I]=0.0; for (k=0; k<N_in; k++) //for all sequences { if (in[k] && I[k][i]>0) //current state is I { q.tr[i][I2M]+=wi[k]; q.tr[i][I2I]+=wi[k]*(I[k][i]-1); } } // end for(k) } else // fast global weights? { float w_I=-1.0/N_filtered; ncol=0; q.tr[i][I2M]=q.tr[i][I2I]=0.0; // Calculate amino acid frequencies fj[a] from weights wg[k] for (k=0; k<N_in; k++) if (in[k] && I[k][i]>0) { ncol++; w_I+=wg[k]; q.tr[i][I2M]+=wi[k]; q.tr[i][I2I]+=wi[k]*(I[k][i]-1); } if (ncol>0) { if (w_I<0) Neff[i]=1.0; else Neff[i] = Nlim - (Nlim-1.0)*fpow2(scale*w_I); // fprintf(stderr,"I i=%3i ncol=%3i Neff_M=%5.2f Nlim=%5.2f w_I=%5.3f Neff_I=%5.2f\n",i,ncol,q.Neff_HMM,Nlim,w_I,Neff[i]); } else { Neff[i]=0.0; q.tr[i][I2M]=-100000; q.tr[i][I2I]=-100000; continue; } } // Normalize and take log sum = q.tr[i][I2M]+q.tr[i][I2I]; q.tr[i][I2M]=log2(q.tr[i][I2M]/sum); q.tr[i][I2I]=log2(q.tr[i][I2I]/sum); } // end loop through alignment columns i ////////////////////////////////////////////////////////////////////////////////////////////// delete[](wi); wi = NULL; // delete n[][] for (j=1; j<=L; j++){ delete[](n[j]); (n[j]) = NULL; } delete[](n); (n) = NULL; q.tr[0][I2M]=0; q.tr[0][I2I]=-100000; q.tr[L][I2M]=0; q.tr[L][I2I]=-100000; q.Neff_I[0]=99.999; // Assign Neff_I[i] for (i=1; i<=L; i++) // Calculate wi[k] at position i as well as Neff[i] and Neff[i] q.Neff_I[i]=Neff[i]; delete[] Neff; Neff = NULL; return; } /* this is the end of Alignment::Transitions_from_I_state() */ ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Calculate transitions q.tr[i][a] (a=DM,DD) with pos-specific subalignments */ void Alignment::Transitions_from_D_state(HMM& q, char* in) { // Calculate position-dependent weights wi[k] for each i. // For calculation of weights in column i use sub-alignment // over sequences which have a DELETE in column i // and over columns where none of these sequences has an end gap. // This is done by updating the arrays n[j][a] and rj[j] at each step i-1->i while letting i run from 1 to L. // n[j][a] = number of occurences of index a at column j of the subalignment, // => only columns with n[j][ENDGAP]=0 are contained in the subalignment! // If no sequences enter or leave the subalignment at the step i-1 -> i (i.e. change=0) // then the old values wi[k], Neff[i-1], and ncol are used for the new position i. // Index a can be an amino acid (0-19), ANY=20, GAP=21, or ENDGAP=22 int k; // index of sequence int i,j; // position in alignment int a; // amino acid (0..19) int naa; // number of different amino acids int** n; // n[j][a] = number of seq's with some residue at column i AND a at position j //float wi[MAXSEQ]; // weight of sequence k in column i, calculated from subalignment i float *wi=NULL; // weight of sequence k in column i, calculated from subalignment i //float Neff[MAXRES]; // diversity of subalignment i float *Neff = new(float[par.maxResLen]); // diversity of subalignment i int nseqi=0; // number of sequences in subalignment i (for DEBUGGING) int ncol=0; // number of columns j that contribute to Neff[i] char change; // has the set of sequences in subalignment changed? 0:no 1:yes float fj[NAA+3]; // to calculate entropy float sum; float Nlim=0.0; // only for global weights float scale=0.0; // only for global weights wi = new(float[N_in+2]); /* FIXME: FS */ // Global weights? if (par.wg==1) { for (k=0; k<N_in; k++) wi[k]=wg[k]; Nlim=fmax(10.0,q.Neff_HMM+1.0); // limiting Neff scale=log2((Nlim-q.Neff_HMM)/(Nlim-1.0)); // for calculating Neff for those seqs with dels at specific pos } // Initialization n = new(int*[L+2]); for (j=1; j<=L; j++) n[j]=new(int[NAA+3]); for (j=1; j<=L; j++) for (a=0; a<NAA+3; a++) n[j][a]=0; ////////////////////////////////////////////////////////////////////////////////////////////// // Main loop through alignment columns for (i=1; i<=L; i++) // Calculate wi[k] at position i as well as Neff[i] { if (par.wg==0) // if local weights { change=0; // Check all sequences k and update n[j][a] and ri[j] if necessary for (k=0; k<N_in; k++) { if (!in[k]) continue; if (X[k][i-1]!=GAP && X[k][i]==GAP) { // ... if sequence k was NOT included in i-1 and has to be included for column i change=1; nseqi++; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]++; } else if (X[k][i-1]==GAP && X[k][i]!=GAP) { // ... if sequence k WAS included in i-1 and has to be thrown out for column i change=1; nseqi--; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]--; } } //end for (k) nseqs[i]=nseqi; // If there is no sequence in subalignment j ... if (nseqi==0) { ncol=0; Neff[i]=0.0; // effective number of sequences = 0! q.tr[i][D2M]=-100000; q.tr[i][D2D]=-100000; continue; } // If subalignment changed: update weights wi[k] and Neff[i] if (change) { // Initialize weights and numbers of residues for subalignment i ncol=0; for (k=0; k<N_in; k++) wi[k]=0.0; // sum wg[k][i] over all columns j and sequences k of subalignment for (j=1; j<=L; j++) { if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; naa=0; for (a=0; a<20; a++) if(n[j][a]) naa++; if (naa==0) continue; ncol++; for (k=0; k<N_in; k++) { if (in[k] && X[k][i]==GAP && X[k][j]<ANY) { if (!n[j][ (int)X[k][j]]) {fprintf(stderr,"Error: Di=%i: n[%i][X[%i]]=0! (X[%i]=%i)\n",i,j,k,k,X[k][j]);} wi[k]+=1.0/float(n[j][ (int)X[k][j] ]*naa); } } } // Check whether number of columns in subalignment is sufficient if (ncol<NCOLMIN) // Take global weights for (k=0; k<N_in; k++) if(in[k] && X[k][i]==GAP) wi[k]=wg[k]; else wi[k]=0.0; // Calculate Neff[i] Neff[i]=0.0; for (j=1; j<=L; j++) { if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; for (a=0; a<20; a++) fj[a]=0; for (k=0; k<N_in; k++) if (in[k] && X[k][i]==GAP && X[k][j]<ANY) fj[ (int)X[k][j] ]+=wi[k]; NormalizeTo1(fj,NAA); for (a=0; a<20; a++) if (fj[a]>1E-10) Neff[i]-=fj[a]*fast_log2(fj[a]); } if (ncol>0) Neff[i]=pow(2.0,Neff[i]/ncol); else Neff[i]=1.0; } else //no update was necessary; copy values for i-1 { Neff[i]=Neff[i-1]; } // Calculate transition probabilities from D state q.tr[i][D2M]=q.tr[i][D2D]=0.0; for (k=0; k<N_in; k++) //for all sequences { if (in[k] && X[k][i]==GAP) //current state is D { if (X[k][i+1]==GAP) //next state is D q.tr[i][D2D]+=wi[k]; else if (X[k][i+1]<=ANY) //next state is M q.tr[i][D2M]+=wi[k]; } } // end for(k) } else // fast global weights? { float w_D=-1.0/N_filtered; ncol=0; q.tr[i][D2M]=q.tr[i][D2D]=0.0; // Calculate amino acid frequencies fj[a] from weights wg[k] for (k=0; k<N_in; k++) //for all sequences if (in[k] && X[k][i]==GAP) //current state is D { ncol++; w_D+=wg[k]; if (X[k][i+1]==GAP) //next state is D q.tr[i][D2D]+=wi[k]; else if (X[k][i+1]<=ANY) //next state is M q.tr[i][D2M]+=wi[k]; } if (ncol>0) { if (w_D<0) Neff[i]=1.0; else Neff[i] = Nlim - (Nlim-1.0)*fpow2(scale*w_D); // fprintf(stderr,"D i=%3i ncol=%3i Neff_M=%5.2f Nlim=%5.2f w_D=%5.3f Neff_D=%5.2f\n",i,ncol,q.Neff_HMM,Nlim,w_D,Neff[i]); } else { Neff[i]=0.0; // effective number of sequences = 0! q.tr[i][D2M]=-100000; q.tr[i][D2D]=-100000; continue; } } // Normalize and take log sum = q.tr[i][D2M]+q.tr[i][D2D]; q.tr[i][D2M]=log2(q.tr[i][D2M]/sum); q.tr[i][D2D]=log2(q.tr[i][D2D]/sum); } // end loop through alignment columns i ////////////////////////////////////////////////////////////////////////////////////////////// q.tr[0][D2M]=0; q.tr[0][D2D]=-100000; q.Neff_D[0]=99.999; // Assign Neff_D[i] for (i=1; i<=L; i++) q.Neff_D[i]=Neff[i]; delete[](wi); wi = NULL;/* FIXME: FS */ // delete n[][] for (j=1; j<=L; j++){ delete[](n[j]); (n[j]) = NULL; } delete[](n); (n) = NULL; delete[] Neff; Neff = NULL; return; } /* this is the end of Alignment::Transitions_from_D_state() */ ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Write alignment without insert states (lower case) to alignment file? */ void Alignment::WriteWithoutInsertsToFile(char* alnfile) { if (v>=2) cout<<"Writing alignment to "<<alnfile<<"\n"; FILE* alnf; if (!par.append) alnf = fopen(alnfile,"w"); else alnf = fopen(alnfile,"a"); if (!alnf) OpenFileError(alnfile); // If alignment name is different from that of query: write name into commentary line if (strncmp(longname,sname[kfirst],DESCLEN-1)) fprintf(alnf,"#%s\n",longname); if (v>=2) cout<<"Writing alignment to "<<alnfile<<"\n"; for (int k=0; k<N_in; k++) if (keep[k] || display[k]==KEEP_ALWAYS) // print if either in profile (keep[k]>0) or display is obligatory (display[k]==2) { fprintf(alnf,">%s\n",sname[k]); for (int i=1; i<=L; i++) fprintf(alnf,"%c",i2aa(X[k][i])); fprintf(alnf,"\n"); } fclose(alnf); } ///////////////////////////////////////////////////////////////////////////////////// // Write stored,filtered sequences WITH insert states (lower case) to alignment file? ///////////////////////////////////////////////////////////////////////////////////// void Alignment::WriteToFile(char* alnfile, const char format[]) { FILE* alnf; if (!par.append) alnf = fopen(alnfile,"w"); else alnf = fopen(alnfile,"a"); if (!alnf) OpenFileError(alnfile); // If alignment name is different from that of query: write name into commentary line if (strncmp(longname,sname[kfirst],DESCLEN-1)) fprintf(alnf,"#%s\n",longname); if (!format || !strcmp(format,"a3m")) { if (v>=2) cout<<"Writing A3M alignment to "<<alnfile<<"\n"; for (int k=0; k<N_in; k++) if (keep[k] || display[k]==KEEP_ALWAYS) // print if either in profile (keep[k]>0) or display obligatory (display[k]==2) fprintf(alnf,">%s\n%s\n",sname[k],seq[k]+1); } else // PSI-BLAST format { if (v>=2) cout<<"Writing PSI-BLAST-formatted alignment to "<<alnfile<<"\n"; for (int k=kfirst; k<N_in; k++) // skip sequences before kfirst!! if (keep[k] || display[k]==KEEP_ALWAYS) // print if either in profile (keep[k]>0) or display obligatory (display[k]==2) { strcut(sname[k]); fprintf(alnf,"%-20.20s ",sname[k]); // for (int i=1; i<=L; i++) fprintf(alnf,"%c",i2aa(X[k][i])); // fprintf(alnf,"\n"); char* ptr=seq[k]; for (; *ptr!='\0'; ptr++) if (*ptr==45 || (*ptr>=65 && *ptr<=90)) fprintf(alnf,"%c",*ptr); fprintf(alnf,"\n"); } } fclose(alnf); } /* * FIXME: this function contains a reference to MAXSEQ & MAXCOL * however, this may not be accessed (FS) */ ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Read a3m slave alignment of hit from file and merge into (query) master alignment */ void Alignment::MergeMasterSlave(Hit& hit, char ta3mfile[]) { Alignment Tali; char* cur_seq = new(char[MAXCOL]); // Sequence currently read in int maxcol=MAXCOL; int l,ll; // position in unaligned template (T) sequence Tali.seq[l] int i; // counts match states in query (Q) HMM int j; // counts match states in T sequence Tali.seq[l] int h; // position in aligned T sequence cur_seq[h] int k; // sequence index char c; // printf("****************%s:%s:%d: did get into MergeMasterSlave\n", __FUNCTION__, __FILE__, __LINE__); if (v>=3) printf("Merging %s to query alignment\n",ta3mfile); // If par.append==1 do not print query alignment if (par.append) for (k=0; k<N_in; k++) keep[k]=display[k]=KEEP_NOT; // Read template alignment into Tali FILE* ta3mf=fopen(ta3mfile,"r"); if (!ta3mf) OpenFileError(ta3mfile); Tali.Read(ta3mf,ta3mfile); fclose(ta3mf); // Filter Tali alignment Tali.Compress(ta3mfile); N_filtered = Tali.Filter(par.max_seqid,par.coverage,par.qid,par.qsc,par.Ndiff); // Record imatch[j] int* imatch=new(int[hit.j2+1]); int step = hit.nsteps; for (j=hit.j1; j<=hit.j2; j++) { // Advance to position of next T match state j while (hit.j[step]<j) step--; imatch[j] = hit.i[step]; // printf("step=%-3i i=%-3i j=%-3i\n",step,imatch[j],j); } // Determine number of match states of Qali for (L=0,l=1; seq[kfirst][l]>'\0'; l++) if ((seq[kfirst][l]>='A' && seq[kfirst][l]<='Z') || seq[kfirst][l]=='-') L++; // For each sequence in T alignment: align to Qali for (k=0; k<Tali.N_in; k++) { if (!Tali.keep[k]) continue; if (N_in>=MAXSEQ) { fprintf(stderr,"WARNING in %s: maximum number of %i sequences exceeded while reading %s. Skipping all following sequences\n",program_name,MAXSEQ,ta3mfile); break; } cur_seq[0]=' '; // 0'th position not used // Add the hit.i1-1 left end gaps to aligned sequence for (h=1; h<hit.i1; h++) cur_seq[h]='-'; // Advance to match state hit.j1 of Tali.seq[k] for (j=0, l=1; (c=Tali.seq[k][l])>'\0'; l++) if ((c>='A' && c<='Z') || c=='-') // match state at position l? if ((++j)==hit.j1) break; // yes: increment j. Reached hit,j1? yes: break if (j<hit.j1) {printf("Error: did not find %i match states in sequence %i of %s. Sequence:\n%s\n",hit.j1,k,Tali.name,Tali.seq[k]); exit(1);} // Write first match state to cur_seq int iprev=hit.i1; // index of previous query match state int lprev=l; // previous T match state in Tali.seq[k][l] cur_seq[h++] = Tali.seq[k][l]; // first column of alignment is Match-Match state // For each further match state j in alignment step = hit.nsteps; for (j=hit.j1+1; j<=hit.j2; j++) { // Advance to position of next T match state j i=imatch[j]; // Advance to position of next T match state j while ((c=Tali.seq[k][++l])>'\0' && ((c>='a' && c<='z') || c=='.')) ; int di=i-iprev; // number of Match states in Q between T match state j-1 and j int dl=l-lprev; // 1 + number of inserted residues in T sequence between T match state j-1 and j if (di==1) { // One Q match state for one T match state (treated as special case for speed reasons) // i: i-1 i di=1 // Q: XXXXXX.....XXXXXX // T: YYYYYYyyyyyYYYYYY // j: j-1 j // l: lprev l dl=6 // Inserts in lower case for (ll=lprev+1; ll<l; ll++) if (Tali.seq[k][ll]!='-' && Tali.seq[k][ll]!='.') cur_seq[h++] = lwrchr(Tali.seq[k][ll]); // Template Match state -> upper case cur_seq[h++] = Tali.seq[k][ll]; } else if (di==0) { // Gap in query: no Q match state for on T match state (special case for speed reasons) // i: i-1 i-1 di=0 // Q: XXXXXX.....~~~XXX // T: YYYYYYyyyyyYYYYYY // j: j-1 j // l: lprev l dl=6 // All T residues (including T match state) in lower case for (ll=lprev+1; ll<=l; ll++) if (Tali.seq[k][ll]!='-' && Tali.seq[k][ll]!='.') cur_seq[h++] = lwrchr(Tali.seq[k][ll]); } else if (di>=dl) { // More Match states in Q than Inserts in the T sequence // => half T inserts y left, half right-aligned in uc, gaps to fill up // Number of T insert residues to be left-aligned: (int)(dl/2) // i: iprev i di=7 // Q: XXXXXXXXXXXXXXXXXX // T: YYYYYYYyyy-yyYYYYY // j: j-1 j // l: lprev l dl=6 // Add left-bounded template residues for (ll=lprev+1; ll<=lprev+(int)(dl/2); ll++) cur_seq[h++]=uprchr(Tali.seq[k][ll]); // Add central gaps for (int gap=1; gap<=di-dl; gap++) cur_seq[h++]='-'; // Add right-bounded residues for (; ll<=l; ll++) cur_seq[h++]=uprchr(Tali.seq[k][ll]); } else if (di<dl) { // Fewer Match states in Q than inserts in T sequence // => half of available space di for left- half for right-aligned T inserts, rest in lc // number of T inserts to be left-aligned in uc: (int)(di/2), // i: iprev i di=5 // Q: XXXXXXXXX.XXXXXXX // T: YYYYYYYyyyyyYYYYY // j: j-1 j // l: lprev l dl=6 // Add left-bounded template residues for (ll=lprev+1; ll<=lprev+(int)(di/2); ll++) cur_seq[h++]=uprchr(Tali.seq[k][ll]); // Add central inserts for (int ins=1; ins<=dl-di; ins++,ll++) if (Tali.seq[k][ll]!='-' && Tali.seq[k][ll]!='.') cur_seq[h++] = lwrchr(Tali.seq[k][ll]); // Add right-bounded residues for (; ll<=l; ll++) cur_seq[h++]=uprchr(Tali.seq[k][ll]); } // printf("i=%-3i j=%-3i l=%-3i cur_seq=%s\n",i,j,l,cur_seq); iprev=i; lprev=l; if (h>=maxcol-1000) // too few columns? Reserve double space { char* new_seq=new(char[2*maxcol]); strncpy(new_seq,cur_seq,maxcol); //////// check: maxcol-1 ???? delete[](cur_seq); (cur_seq) = NULL; cur_seq=new_seq; maxcol*=2; } } // Add the remaining gaps '-' to the end of the template sequence for (i=hit.i2+1; i<=L; i++) cur_seq[h++]='-'; cur_seq[h++]='\0'; keep[N_in] = display[N_in] = KEEP_CONDITIONALLY; seq[N_in]=new(char[h]); if (!seq[N_in]) MemoryError("array for input sequences"); strcpy(seq[N_in],cur_seq); X[N_in]=new(char[h]); if (!X[N_in]) MemoryError("array for input sequences"); I[N_in]=new(short unsigned int[h]); if (!I[N_in]) MemoryError("array for input sequences"); sname[N_in]=new(char[strlen(Tali.sname[k])+1]); if (!sname[N_in]) MemoryError("array for input sequences"); strcpy(sname[N_in],Tali.sname[k]); N_in++; // printf("k=%-3i %s\n",k,Tali.seq[k]); // printf("Query %s\n",seq[kfirst]); // printf("k=%-3i %s\n\n",k,cur_seq); } // end for (k) // printf("N_in=%-5i HMM=%s with %i sequences\n",N_in,ta3mfile,N_filtered); delete[] cur_seq; cur_seq = NULL; delete[] imatch; imatch = NULL; delete[] ksort; ksort=NULL; // if ksort already existed it will be to short for merged alignment delete[] first; first=NULL; // if first already existed it will be to short for merged alignment delete[] last; last=NULL; // if last already existed it will be to short for merged alignment } /* this is the end of Alignment::MergeMasterSlave() */ ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Add a sequence to Qali */ void Alignment::AddSequence(char Xk[], int Ik[]) { int i; // position in query and target if (L<=0) InternalError("L is not set in AddSequence()"); X[N_in]=new(char[L+2]); for (i=0; i<=L+1; i++) X[N_in][i]=Xk[i]; if (Ik==NULL) for (i=0; i<=L+1; i++) I[N_in][i]=0; else for (i=0; i<=L+1; i++) I[N_in][i]=Ik[i]; N_in++; } ///////////////////////////////////////////////////////////////////////////////////// /** * @brief Determine matrix of position-specific weights w[k][i] for multiple alignment * Pos-specific weights are calculated like in "Amino_acid_frequencies_and_transitions_from_M_state()" */ void Alignment::GetPositionSpecificWeights(float* w[]) { // Calculate position-dependent weights wi[k] for each i. // For calculation of weights in column i use sub-alignment // over sequences which have a *residue* in column i (no gap, no end gap) // and over columns where none of these sequences has an end gap. // This is done by updating the arrays n[j][a] at each step i-1->i while letting i run from 1 to L. // n[j][a] = number of occurences of amino acid a at column j of the subalignment, // => only columns with n[j][ENDGAP]=0 are contained in the subalignment! // If no sequences enter or leave the subalignment at the step i-1 -> i (i.e. change=0) // then the old values w[k][i] and ncol are used for the new position i. // Index a can be an amino acid (0-19), ANY=20, GAP=21, or ENDGAP=22 char* in=keep; // to keep the code similar to Amino_acid_frequencies_and_transitions_from_M_state() int k; // index of sequence int i,j; // position in alignment int a; // amino acid (0..19) int naa; // number of different amino acids int** n; // n[j][a] = number of seq's with some residue at column i AND a at position j int nseqi=0; // number of sequences in subalignment i int ncol=0; // number of columns j that contribute to Neff[i] char change; // has the set of sequences in subalignment changed? 0:no 1:yes // Global weights? if (par.wg==1) { for (k=0; k<N_in; k++) for (i=1; i<=L; i++) w[k][i]=wg[k]; } else { // Initialization n = new(int*[L+2]); for (j=1; j<=L; j++) n[j]=new(int[NAA+3]); for (j=1; j<=L; j++) for (a=0; a<NAA+3; a++) n[j][a]=0; ////////////////////////////////////////////////////////////////////////////////////////////// // Main loop through alignment columns for (i=1; i<=L; i++) // Calculate w[k][i] { change=0; // Check all sequences k and update n[j][a] and ri[j] if necessary for (k=0; k<N_in; k++) { if (!in[k]) continue; if (X[k][i-1]>=ANY && X[k][i]<ANY) { // ... if sequence k was NOT included in i-1 and has to be included for column i change=1; nseqi++; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]++; } else if (X[k][i-1]<ANY && X[k][i]>=ANY) { // ... if sequence k WAS included in i-1 and has to be thrown out for column i change=1; nseqi--; for (int j=1; j<=L; j++) n[j][ (int)X[k][j]]--; } } //end for (k) nseqs[i]=nseqi; // If subalignment changed: update weights w[k][i] and Neff[i] if (change) { // Initialize weights and numbers of residues for subalignment i ncol=0; for (k=0; k<N_in; k++) w[k][i]=0.0; // sum wi[k] over all columns j and sequences k of subalignment for (j=1; j<=L; j++) { // do at least a fraction MAXENDGAPFRAC of sequences in subalignment contain an end gap in j? if (n[j][ENDGAP]>MAXENDGAPFRAC*nseqi) continue; naa=0; for (a=0; a<20; a++) if(n[j][a]) naa++; if (naa==0) continue; ncol++; for (k=0; k<N_in; k++) { if (in[k] && X[k][i]<ANY && X[k][j]<ANY) { // if (!n[j][ (int)X[k][j]]) {fprintf(stderr,"Error: Mi=%i: n[%i][X[%i]]=0! (X[%i]=%i)\n",i,j,k,k,X[k][j]);} w[k][i]+=1.0/float(n[j][ (int)X[k][j] ]*naa); } } } // Check whether number of columns in subalignment is sufficient if (ncol<NCOLMIN) // Take global weights for (k=0; k<N_in; k++) if(in[k]) {if(X[k][i]<ANY) w[k][i]=wg[k]; else w[k][i]=0.0;} } } // end loop through alignment columns i /////////////////////////////////////////////////////////////////////// // delete n[][] for (j=1; j<=L; j++){ delete[](n[j]); (n[j]) = NULL; } delete[](n); (n) = NULL; } return; } #ifdef CLUSTALO /* @* Transfer * * take sequence data from Clustal and transfer it into * hhalign accessible information (structure/class) * * Note that hhalign does not see all sequences/profiles * but only sequences that are elements of the 2 profiles * to be aligned. * * References to the required sequences are passed into hhalign * through auxilliary pointers that are shallow copies of the * sequence/profile data available to Clustal. * * Re-allocating memory for these auxilliary pointers * would be desaterous, as it might detach the memory * seen by Clustal. */ void Alignment::Transfer(char **ppcProf, int iCnt){ /* @<variables local to Transfer@> */ int iLen; /* length of profile */ int k; /* generic iterator */ /* @<initialisation@> */ N_in = iCnt; N_filtered = N_ss = 0; kss_dssp = ksa_dssp = kss_pred = kss_conf = -1; kfirst = 0; strcpy(longname, "unknown_long_seq_name"); strcpy(name, "unknown_seq_name"); strcpy(file, "unknown_file_name"); n_display = iCnt; /* @<determine length of profile@> all sequences in profile should have same length, so only do it for 1st */ for (iLen = 0; '\0' != ppcProf[0][iLen]; iLen++); /* @<allocate memory for sequences etc@> */ for (k = 0; k < iCnt; k++){ #define GOOD_MEASURE 1000 /* Temporary -- can be removed once rest in place */ I[k] = new(short unsigned int[iLen+2+GOOD_MEASURE]); X[k] = new(char[iLen+2+GOOD_MEASURE]); seq[k] = new(char[iLen+2+GOOD_MEASURE]); seq[k][0] = ' '; seq[k][1] = '\0'; if (NULL == ppcProf[k]){ printf("%s:%d: Arena[%d]=NULL, cnt=%d\n", __FILE__, __LINE__, k, iCnt); exit(-1); } strcat(seq[k], ppcProf[k]); keep[k] = KEEP_CONDITIONALLY; display[k] = KEEP_CONDITIONALLY; sname[k] = new(char[GOOD_MEASURE]); strcpy(sname[k], "unknown_sname"); } /* (0 <= k < iCnt) */ /* FIXME: Soeding always makes 1st sequence permanent */ /*keep[0] = KEEP_ALWAYS; display[k] = KEEP_ALWAYS;*/ #if 1 /* Believe that the first and last positions are most important in stability of this algorithm. Must make sure that at least 2 sequences with residues in these positions are kept. Think any sequence will do, but better to keep the one with the longest 'contig' */ int iSeq; /* sequence iterator */ int iHeadLen = 0, iHeadID = -1; /* length & ID of longest head contig */ int iTailLen = 0, iTailID = -1; /* length & ID of longest head contig */ int iCont = -1; char *pcFind = NULL; #if 0 printf("%s:%s:%d: NEW PROFILE (%d seq) ================\n", __FUNCTION__, __FILE__, __LINE__, iCnt); #endif for (iSeq = 0; iSeq < iCnt; iSeq++){ #if 0 printf("%s:%s:%d: consider seq %d ------------------\n", __FUNCTION__, __FILE__, __LINE__, iSeq); #endif pcFind = strchr(&seq[iSeq][1], '-'); if (NULL == pcFind){ /* no gap at all in this sequences, spans entire profile */ iHeadID = iTailID = iSeq; iHeadLen = iTailLen = iLen; break; } iCont = (int)(pcFind - &seq[iSeq][1]); if (iCont > iHeadLen){ iHeadLen = iCont; iHeadID = iSeq; } pcFind = strrchr(seq[iSeq], '-'); iCont = iLen - (int)(pcFind - seq[iSeq]); if (iCont > iTailLen){ iTailLen = iCont; iTailID = iSeq; } #if 0 printf("%s:%s:%d: seq %3d: len = %d(%d) %s\n", __FUNCTION__, __FILE__, __LINE__, iSeq, iCont, iLen, seq[iSeq]); #endif } /* 0 <= iSeq < iCnt */ #if 0 printf("%s:%s:%d: seq %d is winner with head contig of %d, seq %d tail contig of %d\n" , __FUNCTION__, __FILE__, __LINE__, iHeadID, iHeadLen, iTailID, iTailLen); #endif if ( (-1 == iHeadID) || (-1 == iTailID) ){ printf("%s:%s:%d: profile has no leading and/or trailing residues (h=%d:t=%d:#=%d)\n", __FUNCTION__, __FILE__, __LINE__, iHeadID, iTailID, iCnt); } else{ keep[iHeadID] = KEEP_ALWAYS; keep[iTailID] = KEEP_ALWAYS; } #endif /* @= */ return; } /* this is the end of Transfer() */ #endif #ifdef CLUSTALO /* @* Alignment::ClobberGlobal (eg: qali) * * Note: originally hhalign() was stand-alone code, * there are a couple of GLOBAL (!) variables, * which would have been destroyed on exit. * However, now there is no 'exit' from hhalign(), * and on re-entry the global variable must be clean again. */ void Alignment::ClobberGlobal(void){ /* @<essentials@> these are essential to re-set (as some of them are used as flags) */ for(int k=0; k<N_in; k++) { delete[] sname[k]; sname[k] = NULL; delete[] seq[k]; seq[k] = NULL; delete[] X[k]; X[k] = NULL; delete[] I[k]; I[k] = NULL; } delete[] nres; nres = NULL; delete[] first; first = NULL; delete[] last; last = NULL; delete[] ksort; ksort = NULL; N_in = N_filtered = n_display = 0; L = 0; kss_dssp = ksa_dssp = kss_pred = kss_conf = kfirst = -1; /* @<re-set but keep memory@> do not free the memory but re-set content */ longname[0] = '\0'; //delete[] longname; longname = NULL; keep[0] = '\0'; //delete[] keep; keep = NULL; display[0] = '\0'; //delete[] display; display = NULL; wg[0] = 0; //delete[] wg; wg = NULL; nseqs[0] = 0; //delete[] nseqs; nseqs = NULL; name[0]='\0'; fam[0]='\0'; file[0]='\0'; //delete[] sname; sname = NULL; //delete[] seq; seq = NULL; //delete[] X; X = NULL; //delete[] I; I = NULL; //delete[] l; l = NULL; /* @= */ return; } #endif
GB_binop__max_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__max_int32 // A.*B function (eWiseMult): GB_AemultB__max_int32 // A*D function (colscale): GB_AxD__max_int32 // D*A function (rowscale): GB_DxB__max_int32 // C+=B function (dense accum): GB_Cdense_accumB__max_int32 // C+=b function (dense accum): GB_Cdense_accumb__max_int32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__max_int32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__max_int32 // C=scalar+B GB_bind1st__max_int32 // C=scalar+B' GB_bind1st_tran__max_int32 // C=A+scalar GB_bind2nd__max_int32 // C=A'+scalar GB_bind2nd_tran__max_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_IMAX (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT32 || GxB_NO_MAX_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__max_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__max_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__max_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__max_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__max_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__max_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__max_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__max_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB_bind1st_tran__max_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB_bind2nd_tran__max_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
transform.h
/* * transform.h * * Created on: Dec 28, 2015 * @author: agibsonccc * @author: raver119@gmail.com */ #ifndef TRANSFORM_H_ #define TRANSFORM_H_ #include <vector> #include <templatemath.h> #include <ops/ops.h> #include <ops/special_ops.h> #ifdef _OPENMP #include <omp.h> #endif #include <pairwise_util.h> #include <dll.h> #include <loops/reduce.h> #include <loops/scalar.h> #include <loops/indexreduce.h> #include <loops/broadcasting.h> #ifdef __CUDACC__ #include <cuda.h> #include <cuda_runtime.h> #include <helper_cuda.h> #endif #ifndef _OPENMP #define omp_get_thread_num() 0 #define omp_get_max_threads() 1 #endif #include "legacy_ops.h" namespace functions { namespace transform { template<typename T> class Transform { public: #ifdef __CUDACC__ /** * Cuda implementation of transform * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ virtual __inline__ __device__ void transform( T *dy, int *shapeInfo, T *params, T *result, int *indexes) { Nd4jIndex n = shape::length(shapeInfo); int totalThreads = gridDim.x * blockDim.x; Nd4jIndex i = blockIdx.x * blockDim.x + threadIdx.x; /* equal, positive, non-unit increments. */ #pragma unroll for (; i < n; i+= totalThreads) { result[indexes[i]] = op(dy[indexes[i]], params); } } /** * Cuda implementation of transform * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ template<typename OpType> static __inline__ __device__ void transformCuda( T *dy, int *shapeInfo, T *params, T *result, int *resultShapeInfo, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, int *tadShapeInfo, Nd4jIndex *tadOffsets) { if(OpType::requiresSpecial) { OpType::execSpecialCuda(dy,shapeInfo,result,resultShapeInfo,params, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); return; } else { int *xShape = shape::shapeOf(shapeInfo); int *xStride = shape::stride(shapeInfo); char xOrder = shape::order(shapeInfo); char resultOrder = shape::order(resultShapeInfo); int xRank = shape::rank(shapeInfo); int xOffset = shape::offset(shapeInfo); int xElementWiseStride = shape::elementWiseStride(shapeInfo); int resultElementWiseStride = shape::elementWiseStride(resultShapeInfo); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int length; if(threadIdx.x == 0) length = shape::length(shapeInfo); __syncthreads(); if(xElementWiseStride >= 1 && resultElementWiseStride >= 1 && xOrder == resultOrder) { transformCuda<OpType>( length, dy, xElementWiseStride, params, result, resultElementWiseStride, allocationPointer, reductionPointer, manager); } else { /* equal, positive, non-unit increments. */ //long allocSize = sizeof(int) * xRank; //int *xIdx = shape::cuMalloc(manager->getT1ShapeBuffer(), allocSize); int xCoord[MAX_RANK]; #pragma unroll for (Nd4jIndex i = tid; i < length; i+= gridDim.x * blockDim.x) { //int *xIdx = shape::ind2sub(xRank, xShape, i, xIdx); shape::ind2sub(xRank,shape::shapeOf(shapeInfo),i, xCoord); Nd4jIndex xOffset2 = shape::getOffset(xOffset, xShape, xStride, xCoord, xRank); Nd4jIndex resultOffset2 = shape::getOffset(0,xShape,shape::stride(resultShapeInfo),xCoord,xRank); result[resultOffset2] = OpType::op(dy[xOffset2], params); } } } } /** * Cuda implementation of transform * @param dx * @param xShapeInfo * @param result * @param resultShapeInfo * @param extraParams * @param n */ template<typename OpType> static __inline__ __device__ void transformCuda( Nd4jIndex n, T *dy, int incy, T *params, T *result, int resultStride, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { int totalThreads = gridDim.x * blockDim.x; Nd4jIndex i = blockIdx.x * blockDim.x + threadIdx.x; if(incy == 1 && resultStride == 1) { /* equal, positive, non-unit increments. */ #pragma unroll for (; i < n; i += totalThreads) { result[i] = OpType::op(dy[i], params); } } else { /* equal, positive, non-unit increments. */ #pragma unroll for (; i < n; i += totalThreads) { result[i * resultStride] = OpType::op(dy[i * incy], params); } } } static __inline__ __device__ void transformCuda( const int opNum, T *dy, int *shapeInfo, T *params, T *result, int *resultShapeInfo, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, int *tadShapeInfo, Nd4jIndex *tadOffsets) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(dy, shapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets), TRANSFORM_OPS); } static __inline__ __device__ void transformCuda( const int opNum, Nd4jIndex n, T *dy, int incy, T *params, T *result, int resultStride, int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) { DISPATCH_BY_OPNUM(transformCuda, PARAMS(n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, manager), TRANSFORM_OPS); } #endif static void exec(int opNum, T *dx, int xStride, T *result, int resultStride, T *extraParams, const int n) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xStride, result, resultStride, extraParams, n), TRANSFORM_OPS); } static void exec( int opNum, T *dx, int *xShapeInfo, T *result, int *resultShapeInfo, T *extraParams, int *indexes, int *resultIndexes, int *tadShapeInfo, Nd4jIndex *tadOffsets) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeInfo, result, resultShapeInfo, extraParams, indexes, resultIndexes, tadShapeInfo, tadOffsets), TRANSFORM_OPS); } static void exec( int opNum, T *dx, int *xShapeInfo, T *result, int *resultShapeInfo, T *extraParams, int *tadShapeInfo, Nd4jIndex *tadOffsets) { DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeInfo, result, resultShapeInfo, extraParams, tadShapeInfo, tadOffsets), TRANSFORM_OPS); } template<typename OpType> static void _CUDA_H exec( T *dx, int *xShapeInfo, T *result, int *resultShapeInfo, T *extraParams, int *tadShapeInfo, Nd4jIndex *tadOffsets) { if(OpType::requiresSpecial) { OpType::execSpecial(dx,xShapeInfo,result,resultShapeInfo,extraParams, tadShapeInfo, tadOffsets); return; } int n = shape::length(xShapeInfo); int xElementWiseStride = shape::elementWiseStride(xShapeInfo); int resultElementWiseStride = shape::elementWiseStride(resultShapeInfo); if(xElementWiseStride >= 1 && resultElementWiseStride >= 1 && shape::order(xShapeInfo) == shape::order(resultShapeInfo)) { exec<OpType>(dx,xElementWiseStride,result,resultElementWiseStride,extraParams,n); } else { int shapeIter[MAX_RANK]; int coord[MAX_RANK]; int dim; int xStridesIter[MAX_RANK]; int resultStridesIter[MAX_RANK]; int *xShape = shape::shapeOf(xShapeInfo); int *xStride = shape::stride(xShapeInfo); int *resultStride = shape::stride(resultShapeInfo); int rank = shape::rank(xShapeInfo); if(PrepareTwoRawArrayIter<T>(rank, xShape, dx, xStride, result, resultStride, &rank, shapeIter, &dx, xStridesIter, &result, resultStridesIter) >= 0) { ND4J_RAW_ITER_START(dim, rank, coord, shapeIter); { // Process the innermost dimension T *xIter = dx; T *resultIter = result; resultIter[0] = OpType::op(xIter[0], extraParams); } ND4J_RAW_ITER_TWO_NEXT(dim, rank, coord, shapeIter, dx, xStridesIter, result, resultStridesIter); } } } template<typename OpType> static void exec( T *dx, int *xShapeInfo, T *result, int *resultShapeInfo, T *extraParams, int *indexes, int *resultIndexes, int *tadShapeInfo, Nd4jIndex *tadOffsets) { int n = shape::length(xShapeInfo); #pragma omp parallel for simd schedule(guided) proc_bind(AFFINITY) default(shared) for (Nd4jIndex i = 0; i < n; i++) { result[resultIndexes[i]] = OpType::op(dx[indexes[i]], extraParams); } } template<typename OpType> static void exec(T *dx, int xStride, T *result, int resultStride, T *extraParams, const int n) { int elementsPerThread = n / ELEMENT_THRESHOLD; int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread); num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads()); int span = (n / num_threads) + 8; if (xStride == 1 && resultStride == 1) { #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > n) end = n; #pragma omp simd for (Nd4jIndex i = start; i < end; i++) { result[i] = OpType::op(dx[i], extraParams); } } } else { #pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared) { int tid = omp_get_thread_num(); int start = span * tid; int end = span * (tid + 1); if (end > n) end = n; #pragma omp simd for (Nd4jIndex i = start; i < end; i++) { result[i*resultStride] = OpType::op(dx[i * xStride], extraParams); } } } } }; } } #ifdef __CUDACC__ /** * The c and driver interface * for th kernels * @param opNum the op number * @param n the length of the problem * @param idx * the start index * @param dy the vector to transform * @param incy the stride for the vector * @param params the extra parameters for the problem * @param result the result storage * @param blockernelHeight the block size for the problem */ template <typename T> __device__ void transformGeneric( int opNum, Nd4jIndex n, T *dy, int incy, T *params, T *result, int resultStride, int *allocationPointer, T *reductionPointer) { __shared__ UnifiedSharedMemory *manager; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), 0); } __syncthreads(); functions::transform::Transform<T>::transformCuda( opNum, n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, manager); } template <typename T, typename OpClass> __device__ void transformSimpleGeneric( Nd4jIndex n, T *dy, int incy, T *params, T *result, int resultStride, int *allocationPointer, T *reductionPointer) { __shared__ UnifiedSharedMemory *manager; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), 0); } __syncthreads(); functions::transform::Transform<T>::template transformCuda<OpClass>( n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, manager); } /** * The c and driver interface * for th kernels * @param opNum the op number * @param n the length of the problem * @param idx * the start index * @param dy the vector to transform * @param incy the stride for the vector * @param params the extra parameters for the problem * @param result the result storage * @param blockernelHeight the block size for the problem */ /* template <typename T> __device__ void transformGeneric( int opNum, T *dy, int *xShapeInfo, int xRank, T *params, T *result,int *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::transform::Transform<T>::transformCuda( opNum, dy, xShapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager); } */ template <typename T, typename OpClass> __device__ void transformSimpleGeneric( T *dy, int *xShapeInfo, int xRank, T *params, T *result,int *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); functions::transform::Transform<T>::template transformCuda<OpClass>( dy, xShapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets); } /** * The c and driver interface * for th kernels * @param opNum the op number * @param n the length of the problem * @param idx * the start index * @param dy the vector to transform * @param incy the stride for the vector * @param params the extra parameters for the problem * @param result the result storage * @param blockernelHeight the block size for the problem */ template <typename T> __device__ void transformGenericIndexes( int opNum, T *dy, int *xShapeInfo, int xRank, T *params, T *result,int *indexes, int *allocationPointer, T *reductionPointer) { __shared__ UnifiedSharedMemory *manager; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; manager = new(shmem) UnifiedSharedMemory((int *) shmem); manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank); } __syncthreads(); /* functions::transform::Transform<T>::transformCuda( opNum, dy, xShapeInfo, params, result, indexes, allocationPointer, reductionPointer, manager); */ } /** * The c and driver interface * for th kernels * @param opNum the op number * @param n the length of the problem * @param idx * the start index * @param dy the vector to transform * @param incy the stride for the vector * @param params the extra parameters for the problem * @param result the result storage * @param blockernelHeight the block size for the problem */ extern "C" __global__ void transformDoubleIndexes( int opNum, double *dy, int *shapeInfo, int xRank, double *params, double *result,int *indexes, int *allocationPointer, double *reductionPointer) { transformGenericIndexes<double>( opNum, dy, shapeInfo, xRank, params, result,indexes, allocationPointer, reductionPointer); } /** * The c and driver interface * for th kernels * @param opNum the op number * @param n the length of the problem * @param idx * the start index * @param dy the vector to transform * @param incy the stride for the vector * @param params the extra parameters for the problem * @param result the result storage * @param blockernelHeight the block size for the problem */ extern "C" __global__ void transformFloatIndexes( int opNum, float *dy, int *shapeInfo, int xRank, float *params, float *result,int *indexes, int *allocationPointer, float *reductionPointer) { transformGenericIndexes<float>( opNum, dy, shapeInfo, xRank, params, result,indexes, allocationPointer, reductionPointer); } extern "C" __global__ void transformHalfIndexes( int opNum, float16 *dy, int *shapeInfo, int xRank, float16 *params, float16 *result,int *indexes, int *allocationPointer, float16 *reductionPointer) { transformGenericIndexes<float16>( opNum, dy, shapeInfo, xRank, params, result,indexes, allocationPointer, reductionPointer); } /** * This is utility kernel, that updates given special buffer with proper values in device memory */ extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, int *specialPointer, int rows) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; dimension[0] = 0; maxDimension[0] = 1; specialPointer[0] = 2; specialPointer[1] = rows; specialPointer[2] = 1; specialPointer[3] = 1; specialPointer[4] = 1; specialPointer[5] = 0; specialPointer[6] = 1; specialPointer[7] = 99; } extern "C" __global__ void prepareDimensionalShapeBuffer(int *xShapeInfoBuffer, float *extraParams, int *zShapeInfo) { // extraParams[0] - number of dimensions // extraParams[1] - dimension int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid > 0) return; int targetDimension = (int) extraParams[1]; printf("Target dimension: [%i]\n", targetDimension); int targetWidth = shape::shapeOf(xShapeInfoBuffer)[targetDimension]; printf("Target rank: [%i]\n", targetWidth); } template <typename T> __device__ void fillIsMaxGeneric(T *dx, long length, long idx) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (long i = tid; i < length; i+= blockDim.x * gridDim.x) { dx[i] = (i == idx? 1.0 : 0.0); } } extern "C" __global__ void fillIsMaxFloat(float *dx, long length, long idx) { fillIsMaxGeneric<float>(dx, length, idx); } extern "C" __global__ void fillIsMaxDouble(double *dx, long length, long idx) { fillIsMaxGeneric<double>(dx, length, idx); } extern "C" __global__ void fillIsMaxHalf(float16 *dx, long length, long idx) { fillIsMaxGeneric<float16>(dx, length, idx); } template <typename T> __device__ void fillDimensionalIsMaxGeneric(T *dX, int *xShapeInfo, T *dZ, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; __shared__ int *tadShape; __shared__ int *tadStride; __shared__ int tadRank; __shared__ char tadOrder; if (threadIdx.x == 0) { tadLength = shape::tadLength(zShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(zShapeInfo) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo); tadStride = shape::stride(tadOnlyShapeInfo); tadRank = shape::rank(tadOnlyShapeInfo); tadOrder = shape::order(tadOnlyShapeInfo); } __syncthreads(); for (int r = blockIdx.x; r < numTads; r+= gridDim.x) { int tadOffsetForBlock = tadOffsets[r]; int highestElement = (int) dX[r]; if (dimensionLength > 1 || tadEWS < 1) { int xCoord[MAX_RANK]; for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { shape::ind2subC(tadRank,tadShape, e, xCoord); Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank); dZ[xOffset] = (e == highestElement? (T) 1.0f : (T) 0.0f); } } else { for (int e = threadIdx.x; e < tadLength; e += blockDim.x) { // so, we just set dZ[e] for each TAD. Sure, e should be replaced with int idx = tadOffsetForBlock + (e * tadEWS); dZ[idx] = (e == highestElement? (T) 1.0f : (T) 0.0f); } } } } extern "C" __global__ void fillDimensionalIsMaxFloat(float *dx, int *xShapeInfo, float *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) { fillDimensionalIsMaxGeneric<float>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets); } extern "C" __global__ void fillDimensionalIsMaxDouble(double *dx, int *xShapeInfo, double *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) { fillDimensionalIsMaxGeneric<double>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets); } extern "C" __global__ void fillDimensionalIsMaxHalf(float16 *dx, int *xShapeInfo, float16 *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) { fillDimensionalIsMaxGeneric<float16>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets); } template <typename T> __device__ void concatKernelGeneric(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, T *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int zRank = shape::rank(resultShapeInfo); T **dataT = (T **) data; int **shapeInfoPointers = (int **) inputShapeInfos; int **tadShapes = (int **) tadPointers; Nd4jIndex **tadOffsets = (Nd4jIndex **) offsetPointers; //__shared__ int tDim[1]; __shared__ int baseIdx; __shared__ int yLength; __shared__ char yOrder; __shared__ int yEWS; char zOrder = shape::order(resultShapeInfo); int zEWS = shape::elementWiseStride(resultShapeInfo); int tadEWS = shape::elementWiseStride(zTadShape); int zLength = shape::length(resultShapeInfo); __shared__ int arrOffset; __shared__ int numTads; if (shape::isVector(resultShapeInfo)) { //if (threadIdx.x == 0) //printf("Vector here\n"); if (zEWS >= 1) { for (int r = blockIdx.x; r < numArrays; r += gridDim.x) { if(shape::isVector(shapeInfoPointers[r]) || shape::order(shapeInfoPointers[r]) == shape::order(resultShapeInfo)) { yLength = shape::length(shapeInfoPointers[r]); yEWS = shape::elementWiseStride(shapeInfoPointers[r]); // FIXME: this is bad __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } } __syncthreads(); for (int i = threadIdx.x; i < yLength && baseIdx + i < zLength; i += blockDim.x) { result[baseIdx + i * zEWS] = dataT[r][i * yEWS]; } __syncthreads(); } else { if (tid == 0) printf("Non-matched order for vector\n"); } } } else { if (tid == 0) printf("Vector Non-1 zEWS\n"); } return; } // TODO: to be pulled into separate kernel. matrix concatenation for (int r = 0; r < numArrays; r ++) { int *currentShape = shapeInfoPointers[r]; T *currentData = dataT[r]; int *currentTad = tadShapes[r]; Nd4jIndex *currentOffsets = tadOffsets[r]; if (threadIdx.x == 0) { yLength = shape::length(currentTad); yOrder = shape::order(currentTad); yEWS = shape::elementWiseStride(currentTad); numTads = shape::length(currentShape) / yLength; arrOffset = 0; for (int f = 0; f < r; f++) { arrOffset += shape::length(tadShapes[f]); } } __syncthreads(); if (yLength == 1) { // edge case, each thread will handle it's own tad then for (int j = tid; j < numTads; j += blockDim.x * gridDim.x) { Nd4jIndex inputOffset = currentOffsets[j]; Nd4jIndex resultOffset = zOffsets[j]; T *dataTAD = currentData + inputOffset; T *resultTAD = result + resultOffset; int sub[MAX_RANK]; if (shape::order(zTadShape) == 'f') { shape::ind2sub(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub); } else { shape::ind2subC(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub); } Nd4jIndex baseOffset = shape::getOffset(0,shape::shapeOf(zTadShape),shape::stride(zTadShape), sub, shape::rank(zTadShape)); resultTAD += baseOffset; int yRank = shape::rank(currentTad); int tadRank = shape::rank(zTadShape); shape::ind2subC(yRank, shape::shapeOf(currentTad), 0,sub); Nd4jIndex yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), sub, yRank); resultOffset = shape::getOffset(0, shape::shapeOf(zTadShape), shape::stride(zTadShape), sub, tadRank); resultTAD[resultOffset] = dataTAD[yOffset]; } } else { for (int j = blockIdx.x; j < numTads; j += gridDim.x) { Nd4jIndex inputOffset = currentOffsets[j]; Nd4jIndex resultOffset = zOffsets[j]; T *dataTAD = currentData + inputOffset; T *resultTAD = result + resultOffset; int sub[MAX_RANK]; shape::ind2subC(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub); Nd4jIndex baseOffset = shape::getOffset(0,shape::shapeOf(zTadShape),shape::stride(zTadShape), sub, shape::rank(zTadShape)); resultTAD += baseOffset; if (zOrder == yOrder && yEWS > 0 && tadEWS > 0) { //if (threadIdx.x == 0) // printf("Branch A\n"); for (int i = threadIdx.x; i < yLength; i += blockDim.x) { resultTAD[i * tadEWS] = dataTAD[i * yEWS]; } } else { if(tadEWS > 0 && shape::order(resultShapeInfo) == shape::order(currentTad)) { //if (threadIdx.x == 0) // printf("Branch B\n"); if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(shapeInfoPointers[f]); } //printf("R: %i; baseIdx: %i;\n", baseIdx); } __syncthreads(); if (numTads == 1) { for(int k = threadIdx.x; k < yLength; k+= blockDim.x) { resultTAD[baseIdx + k * tadEWS] = dataTAD[k]; } } else { int yIdx[MAX_RANK]; int yRank = shape::rank(currentTad); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::ind2subC(yRank, shape::shapeOf(currentTad), i, yIdx); int yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), yIdx, yRank); resultTAD[baseIdx + i * tadEWS] = dataTAD[yOffset]; } } __syncthreads(); } else { //if (threadIdx.x == 0) // printf("Branch C; yLength: %i;\n", yLength); int yIdx[MAX_RANK]; int yRank = shape::rank(currentTad); int tadRank = shape::rank(zTadShape); for (int i = threadIdx.x; i < yLength; i+= blockDim.x) { shape::ind2subC(yRank, shape::shapeOf(currentTad), i,yIdx); int yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), yIdx, yRank); int resultOffset = shape::getOffset(0, shape::shapeOf(zTadShape), shape::stride(zTadShape), yIdx, tadRank); resultTAD[resultOffset] = dataTAD[yOffset]; } } } __syncthreads(); } } __syncthreads(); } } template <typename T> __device__ void concatKernelScalarGeneric(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, T *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { int tid = blockIdx.x * blockDim.x + threadIdx.x; T **input = (T **) data; for (int i = tid; i < numArrays; i += blockDim.x * gridDim.x) { result[i] = input[i][0]; } } extern "C" __global__ void concatKernelScalarFloat(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelScalarGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelScalarHalf(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelScalarGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelScalarDouble(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelScalarGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } template <typename T> __device__ void concatKernelHStackGeneric(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, T *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { // we expect all data coming in as vectors, and result as 2D matrix // the only significant difference here is the fact that input lengths might be different int **inputShapes = (int**) inputShapeInfos; T **input = (T **) data; __shared__ int inputEWS; __shared__ int resultEWS; __shared__ int inputLength; if (threadIdx.x == 0) { resultEWS = shape::elementWiseStride(resultShapeInfo); } __syncthreads(); for (int r = blockIdx.x; r < numArrays; r+= gridDim.x) { __shared__ int baseIdx; if (threadIdx.x == 0) { baseIdx = 0; for (int f = 0; f < r; f++) { baseIdx += shape::length(inputShapes[f]); } } __syncthreads(); T *inputData = (T *) input[r]; if (threadIdx.x == 0) { inputEWS = shape::elementWiseStride(inputShapes[r]); inputLength = shape::length(inputShapes[r]); } __syncthreads(); for(int i = threadIdx.x; i < inputLength; i += blockDim.x) { result[baseIdx + i * resultEWS] = inputData[i * inputEWS]; } __syncthreads(); } } extern "C" __global__ void concatKernelHStackFloat(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelHStackGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelHStackDouble(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelHStackGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelHStackHalf(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelHStackGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } template <typename T> __device__ void concatKernelVStackGeneric(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfos, T *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { /* this is special case for concat: we group bunch of vectors into 2D matrix also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size */ int **inputShapes = (int**) inputShapeInfos; T **input = (T **) data; __shared__ int inputEWS; __shared__ int resultEWS; __shared__ int inputLength; if (threadIdx.x == 0) { inputLength = shape::length(inputShapes[0]); inputEWS = shape::elementWiseStride(inputShapes[0]); resultEWS = shape::elementWiseStride(resultShapeInfo); } __syncthreads(); for (int r = blockIdx.x; r < numArrays; r+= gridDim.x) { int resultOffset = r * inputLength * resultEWS; T *inputData = (T *) input[r]; for(int i = threadIdx.x; i < inputLength; i += blockDim.x) { result[resultOffset + i * resultEWS] = inputData[i * inputEWS]; } } } extern "C" __global__ void concatKernelVStackFloat(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelVStackGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelVStackDouble(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelVStackGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelVStackHalf(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) { concatKernelVStackGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers); } extern "C" __global__ void concatKernelDouble(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, double *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) { concatKernelGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); } extern "C" __global__ void concatKernelFloat(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) { concatKernelGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); } extern "C" __global__ void concatKernelHalf(int dimension, int numArrays, Nd4jPointer *data, Nd4jPointer *inputShapeInfo, float16 *result, int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) { concatKernelGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets); } template <typename T> __device__ void pullRowsKernelGeneric(T *x, int *xShapeInfo, T *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { int xEWS = shape::elementWiseStride(tadShapeInfo); int zEWS = shape::elementWiseStride(zTadShapeInfo); int tadLength = shape::length(tadShapeInfo); for (int idx = blockIdx.x; idx < n; idx += gridDim.x) { int tadOffsetForBlock = tadOffsets[indexes[idx]]; T *rX = x + tadOffsetForBlock; T *rZ = z + zTadOffsets[idx]; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { rZ[i * zEWS] = rX[i * xEWS]; } } } extern "C" __global__ void pullRowsKernelHalf( float16 *x, int *xShapeInfo, float16 *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { pullRowsKernelGeneric<float16>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); } extern "C" __global__ void pullRowsKernelFloat(float *x, int *xShapeInfo, float *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { pullRowsKernelGeneric<float>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); } extern "C" __global__ void pullRowsKernelDouble(double *x, int *xShapeInfo, double *z, int *zShapeInfo, int n, int *indexes, int *tadShapeInfo, Nd4jIndex *tadOffsets, int *zTadShapeInfo, Nd4jIndex *zTadOffsets) { pullRowsKernelGeneric<double>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets); } template <typename T> __device__ void convertToHalfGeneric(T *dx, int n, half *dz) { int tid = threadIdx.x + blockIdx.x * gridDim.x; for (Nd4jIndex i = tid; i < n; i += blockDim.x * gridDim.x ) { dz[i] = __float2half((float) dx[i]); } } extern "C" __global__ void kernelFloatsToHalfs(float *dx, int n, half *dz) { convertToHalfGeneric<float>(dx, n, dz); } extern "C" __global__ void kernelDoublesToHalfs(double *dx, int n, half *dz) { convertToHalfGeneric<double>(dx, n, dz); } template <typename T> __device__ void convertHalfsToGeneric(half *dx, int n, T *dz) { int tid = threadIdx.x + blockIdx.x * gridDim.x; for (Nd4jIndex i = tid; i < n; i += blockDim.x * gridDim.x ) { dz[i] = (T) __half2float(dx[i]); } } extern "C" __global__ void kernelHalfsToDoubles(half *dx, int n, double *dz) { convertHalfsToGeneric<double>(dx, n, dz); } extern "C" __global__ void kernelHalfsToFloats(half *dx, int n, float *dz) { convertHalfsToGeneric<float>(dx, n, dz); } /** * This kernel accumulates X arrays, and stores result into Z * * @tparam T * @param x * @param z * @param n * @param length */ template<typename T> __device__ void accumulateKernelGeneric(T **x, T *z, int n, const Nd4jIndex length) { __shared__ T *shmem; if (threadIdx.x == 0) { extern __shared__ unsigned char sharedmem[]; shmem = (T *) sharedmem; } __syncthreads(); for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) { shmem[threadIdx.x] = 0.0f; Nd4jIndex baseIdx = r; // aggregation step, we roll over all arrays for (int ar = 0; ar < n; ar++) { T *cdata = (T *) x[ar]; cdata += baseIdx; if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] += cdata[threadIdx.x]; } T *wdata = z + baseIdx; // saving accumulated values if (baseIdx + threadIdx.x < length) { wdata[threadIdx.x] = shmem[threadIdx.x]; } } } extern "C" __global__ void accumulateKernelHalf(float16 **dx, float16 *dz, int n, Nd4jIndex length) { accumulateKernelGeneric<float16>(dx, dz, n, length); } extern "C" __global__ void accumulateKernelFloat(float **dx, float *dz, int n, Nd4jIndex length) { accumulateKernelGeneric<float>(dx, dz, n, length); } extern "C" __global__ void accumulateKernelDouble(double **dx, double *dz, int n, Nd4jIndex length) { accumulateKernelGeneric<double>(dx, dz, n, length); } template <typename T> __device__ void averagingKernelGeneric(T **dx, T *dz, int n, Nd4jIndex length, bool propagate) { __shared__ T *shmem; if (threadIdx.x == 0) { extern __shared__ unsigned char sharedmem[]; shmem = (T *) sharedmem; } __syncthreads(); // each block cycles over it's own part of arrays for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) { shmem[threadIdx.x] = (T) 0.0f; Nd4jIndex baseIdx = r; // aggregation step, we roll over all arrays for (int ar = 0; ar < n; ar++) { T *cdata = (T *) dx[ar]; cdata += baseIdx; if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] += cdata[threadIdx.x]; } // average data in shared memory if (baseIdx + threadIdx.x < length) shmem[threadIdx.x] /= n; // div step & write out step if (dz != nullptr) { T *wdata = dz + baseIdx; if (baseIdx + threadIdx.x < length) { wdata[threadIdx.x] = shmem[threadIdx.x]; } } // propagate averaged data to all arrays if (propagate) for (int ar = 0; ar < n; ar++) { T *cdata = (T *) dx[ar]; cdata += baseIdx; if (baseIdx + threadIdx.x < length) cdata[threadIdx.x] = shmem[threadIdx.x]; } } } extern "C" __global__ void averagingKernelHalf(float16 **dx, float16 *dz, int n, Nd4jIndex length, bool propagate) { averagingKernelGeneric<float16>(dx, dz, n, length, propagate); } extern "C" __global__ void averagingKernelFloat(float **dx, float *dz, int n, Nd4jIndex length, bool propagate) { averagingKernelGeneric<float>(dx, dz, n, length, propagate); } extern "C" __global__ void averagingKernelDouble(double **dx, double *dz, int n, Nd4jIndex length, bool propagate) { averagingKernelGeneric<double>(dx, dz, n, length, propagate); } template<typename T> __device__ void tearKernelGeneric(T *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { __shared__ Nd4jIndex tadLength; __shared__ int tadEWS; __shared__ int zEWS; __shared__ int tadRank; __shared__ Nd4jIndex numTads; __shared__ int zRank; __shared__ int *tadShape; __shared__ int *tadStride; __shared__ int *zShape; __shared__ int *zStride; if (threadIdx.x == 0) { tadLength = shape::length(tadShapeInfo); tadEWS = shape::elementWiseStride(tadShapeInfo); zEWS = shape::elementWiseStride(zShapeInfo); tadRank = shape::rank(tadShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; zRank = shape::rank(zShapeInfo); tadShape = shape::shapeOf(tadShapeInfo); tadStride = shape::stride(tadShapeInfo); zShape = shape::shapeOf(zShapeInfo); zStride = shape::stride(zShapeInfo); } __syncthreads(); for (Nd4jIndex r = blockIdx.x; r < numTads; r += gridDim.x) { T *z = (T *) targets[r]; T *s = x + tadOffsets[r]; if (zEWS > 0 && tadEWS > 0) { for (Nd4jIndex i = threadIdx.x; i < tadLength; i += blockDim.x) { z[i * zEWS] = s[i * tadEWS]; } } else { int xCoord[MAX_RANK]; int zCoord[MAX_RANK]; for (Nd4jIndex j = 0; j < tadLength; j++) { shape::ind2sub(tadRank,tadShape, j, xCoord); shape::ind2sub(zRank, zShape, j, zCoord); Nd4jIndex xOffset = shape::getOffset(0, tadShape, tadStride, xCoord, tadRank); Nd4jIndex zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank); z[zOffset] = s[xOffset]; } } } } extern "C" __global__ void tearKernelDouble(double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { tearKernelGeneric<double>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); } extern "C" __global__ void tearKernelFloat(float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { tearKernelGeneric<float>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); } extern "C" __global__ void tearKernelHalf(float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) { tearKernelGeneric<float16>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets); } template<typename T> __device__ void shuffleKernelGeneric(T **dX, int **xShapeInfo, T **dZ, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) { // we assume that shuffle map for each X contains pair TAD Y __shared__ int tadLength; __shared__ int tadEWS; __shared__ int tadRank; __shared__ int numTads; __shared__ int *tadShape; __shared__ int *tadStride; __shared__ int yStride; for (int f = 0; f < N; f++) { T *x = (T *) dX[f]; T *z = (T *) dZ[f]; __syncthreads(); if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo[f]); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]); tadRank = shape::rank(tadOnlyShapeInfo[f]); numTads = shape::length(xShapeInfo[f]) / tadLength; tadShape = shape::shapeOf(tadOnlyShapeInfo[f]); tadStride = shape::stride(tadOnlyShapeInfo[f]); } __syncthreads(); // we roll over the pairs of TADs, thus limit is numTads / 2 for (Nd4jIndex r = blockIdx.x; r < numTads; r += blockDim.x) { if (shuffleMap[r] < 0) continue; Nd4jIndex oldOffset = tadOffsets[f][r]; Nd4jIndex newOffset = tadOffsets[f][shuffleMap[r]]; T *rX = x + oldOffset; T *rY = x + newOffset; T *zX = z + oldOffset; T *zY = z + newOffset; // so we're going to change TAD[oldOffset] with TAD[newOffset] if (tadEWS == 1) { for (Nd4jIndex i = threadIdx.x; i < tadLength; i += blockDim.x) { T oldX = rX[i]; rX[i] = rY[i]; zY[i] = oldX; } } else { // well have to iterate using ind2sub int xCoord[MAX_RANK]; int yCoord[MAX_RANK]; for (Nd4jIndex i = threadIdx.x; i < tadLength; i+= blockDim.x) { shape::ind2subC(tadRank,tadShape, i, xCoord); shape::ind2subC(tadRank,tadShape, i, yCoord); Nd4jIndex xOffset = shape::getOffset(oldOffset, tadShape, tadStride, xCoord, tadRank); Nd4jIndex yOffset = shape::getOffset(newOffset, tadShape, tadStride, yCoord, tadRank); T oldX = x[xOffset]; z[xOffset] = x[yOffset]; z[yOffset] = oldX; } } } } } extern "C" __global__ void shuffleKernelDouble(double **x, int **xShapeInfo, double **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) { shuffleKernelGeneric<double>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } extern "C" __global__ void shuffleKernelFloat(float **x, int **xShapeInfo, float **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) { shuffleKernelGeneric<float>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } extern "C" __global__ void shuffleKernelHalf(float16 **x, int **xShapeInfo, float16 **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) { shuffleKernelGeneric<float16>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets); } // transform strided DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float, INPUT(Nd4jIndex n, float *x, int xStride, float *extraParams, float *z, int zStride, int *allocationPointer, float *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, double, INPUT(Nd4jIndex n, double *x, int xStride, double *extraParams, double *z, int zStride, int *allocationPointer, double *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float16, INPUT(Nd4jIndex n, float16 *x, int xStride, float16 *extraParams, float16 *z, int zStride, int *allocationPointer, float16 *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS)) // transform shaped DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float, INPUT(float *x, int *xShape, int xRank, float *extraParams, float *z, int *zShape, int zRank, int *allocationPointer, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, double, INPUT(double *x, int *xShape, int xRank, double *extraParams, double *z, int *zShape, int zRank, int *allocationPointer, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float16, INPUT(float16 *x, int *xShape, int xRank, float16 *extraParams, float16 *z, int *zShape, int zRank, int *allocationPointer, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS)) #endif #endif /* TRANSFORM_H_ */
opencl_encfs_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_encfs; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_encfs); #else #include <string.h> #include <openssl/opensslv.h> #include <openssl/crypto.h> #include <openssl/ssl.h> #include <openssl/bio.h> #include <openssl/evp.h> #include <openssl/hmac.h> #include <openssl/engine.h> #include "common-opencl.h" #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "options.h" #include "misc.h" #define OUTLEN (32 + 16) #include "opencl_pbkdf2_hmac_sha1.h" #define FORMAT_LABEL "encfs-opencl" #define FORMAT_NAME "EncFS" #define OCL_ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define CPU_ALGORITHM_NAME " AES/Blowfish" #define ALGORITHM_NAME OCL_ALGORITHM_NAME CPU_ALGORITHM_NAME #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(encfs_cpu_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN MEM_ALIGN_WORD #define uint8_t unsigned char #define uint16_t unsigned short #define uint32_t unsigned int #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) /* This handles all widths */ #define GETPOS(i, index) (((index) % v_width) * 4 + ((i) & ~3U) * v_width + (((i) & 3) ^ 3) + ((index) / v_width) * 64 * v_width) static int *cracked; static int any_cracked; static const int MAX_KEYLENGTH = 32; // in bytes (256 bit) static const int MAX_IVLENGTH = 16; static const int KEY_CHECKSUM_BYTES = 4; typedef struct { unsigned int keySize; unsigned int iterations; unsigned int cipher; unsigned int saltLen; unsigned char salt[40]; unsigned int dataLen; unsigned char data[128]; unsigned int ivLength; const EVP_CIPHER *streamCipher; const EVP_CIPHER *blockCipher; } encfs_cpu_salt; static encfs_cpu_salt *cur_salt; static struct fmt_tests tests[] = { {"$encfs$192*181474*0*20*f1c413d9a20f7fdbc068c5a41524137a6e3fb231*44*9c0d4e2b990fac0fd78d62c3d2661272efa7d6c1744ee836a702a11525958f5f557b7a973aaad2fd14387b4f", "openwall"}, {NULL} }; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_out *output; static pbkdf2_salt currentsalt; static cl_mem mem_in, mem_out, mem_salt, mem_state; static unsigned int v_width = 1; /* Vector width of kernel */ static size_t key_buf_size; static int new_keys; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; #define cracked_size (sizeof(*cracked) * global_work_size * v_width) /* * HASH_LOOPS is ideally made by factors of (iteration count - 1) and should * be chosen for a kernel duration of not more than 200 ms */ #define HASH_LOOPS (3 * 251) #define ITERATIONS 181474 /* Just for auto tune */ #define LOOP_COUNT (((currentsalt.iterations - 1 + HASH_LOOPS - 1)) / HASH_LOOPS) #define OCL_CONFIG "encfs" #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: " , ", init: " , ", loop: " , ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } static size_t get_task_max_size() { return 0; } static size_t get_default_workgroup() { if (cpu(device_info[gpu_id])) return get_platform_vendor_id(platform_id) == DEV_INTEL ? 8 : 1; else return 64; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= v_width; key_buf_size = 64 * gws; /// Allocate memory inbuffer = mem_calloc(key_buf_size); output = mem_alloc(sizeof(pbkdf2_out) * gws); cracked = mem_calloc(cracked_size); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, sizeof(pbkdf2_salt), NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); } static void release_clobj(void) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(output); MEM_FREE(cracked); } static void done(void) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); } static void setIVec( unsigned char *ivec, uint64_t seed, unsigned char *key) { unsigned char md[EVP_MAX_MD_SIZE]; unsigned int mdLen = EVP_MAX_MD_SIZE; int i; HMAC_CTX mac_ctx; memcpy( ivec, &key[cur_salt->keySize], cur_salt->ivLength ); for(i=0; i<8; ++i) { md[i] = (unsigned char)(seed & 0xff); seed >>= 8; } // combine ivec and seed with HMAC HMAC_CTX_init(&mac_ctx); HMAC_Init_ex( &mac_ctx, key, cur_salt->keySize, EVP_sha1(), 0 ); HMAC_Init_ex( &mac_ctx, 0, 0, 0, 0 ); HMAC_Update( &mac_ctx, ivec, cur_salt->ivLength ); HMAC_Update( &mac_ctx, md, 8 ); HMAC_Final( &mac_ctx, md, &mdLen ); HMAC_CTX_cleanup(&mac_ctx); memcpy( ivec, md, cur_salt->ivLength ); } static void unshuffleBytes(unsigned char *buf, int size) { int i; for(i=size-1; i; --i) buf[i] ^= buf[i-1]; } static int MIN_(int a, int b) { return (a < b) ? a : b; } static void flipBytes(unsigned char *buf, int size) { unsigned char revBuf[64]; int bytesLeft = size; int i; while(bytesLeft) { int toFlip = MIN_( sizeof(revBuf), bytesLeft ); for(i=0; i<toFlip; ++i) revBuf[i] = buf[toFlip - (i+1)]; memcpy( buf, revBuf, toFlip ); bytesLeft -= toFlip; buf += toFlip; } memset(revBuf, 0, sizeof(revBuf)); } static uint64_t _checksum_64(unsigned char *key, const unsigned char *data, int dataLen, uint64_t *chainedIV) { unsigned char md[EVP_MAX_MD_SIZE]; unsigned int mdLen = EVP_MAX_MD_SIZE; int i; unsigned char h[8] = {0,0,0,0,0,0,0,0}; uint64_t value; HMAC_CTX mac_ctx; HMAC_CTX_init(&mac_ctx); HMAC_Init_ex( &mac_ctx, key, cur_salt->keySize, EVP_sha1(), 0 ); HMAC_Init_ex( &mac_ctx, 0, 0, 0, 0 ); HMAC_Update( &mac_ctx, data, dataLen ); if(chainedIV) { // toss in the chained IV as well uint64_t tmp = *chainedIV; unsigned char h[8]; for(i=0; i<8; ++i) { h[i] = tmp & 0xff; tmp >>= 8; } HMAC_Update( &mac_ctx, h, 8 ); } HMAC_Final( &mac_ctx, md, &mdLen ); HMAC_CTX_cleanup(&mac_ctx); // chop this down to a 64bit value.. for(i=0; i < (mdLen - 1); ++i) h[i%8] ^= (unsigned char)(md[i]); value = (uint64_t)h[0]; for(i=1; i<8; ++i) value = (value << 8) | (uint64_t)h[i]; return value; } static uint64_t MAC_64( const unsigned char *data, int len, unsigned char *key, uint64_t *chainedIV ) { uint64_t tmp = _checksum_64( key, data, len, chainedIV ); if(chainedIV) *chainedIV = tmp; return tmp; } static unsigned int MAC_32( unsigned char *src, int len, unsigned char *key ) { uint64_t *chainedIV = NULL; uint64_t mac64 = MAC_64( src, len, key, chainedIV ); unsigned int mac32 = ((mac64 >> 32) & 0xffffffff) ^ (mac64 & 0xffffffff); return mac32; } static int streamDecode(unsigned char *buf, int size, uint64_t iv64, unsigned char *key) { unsigned char ivec[ MAX_IVLENGTH ]; int dstLen=0, tmpLen=0; EVP_CIPHER_CTX stream_dec; setIVec( ivec, iv64 + 1, key); EVP_CIPHER_CTX_init(&stream_dec); EVP_DecryptInit_ex( &stream_dec, cur_salt->streamCipher, NULL, NULL, NULL); EVP_CIPHER_CTX_set_key_length( &stream_dec, cur_salt->keySize ); EVP_CIPHER_CTX_set_padding( &stream_dec, 0 ); EVP_DecryptInit_ex( &stream_dec, NULL, NULL, key, NULL); EVP_DecryptInit_ex( &stream_dec, NULL, NULL, NULL, ivec); EVP_DecryptUpdate( &stream_dec, buf, &dstLen, buf, size ); EVP_DecryptFinal_ex( &stream_dec, buf+dstLen, &tmpLen ); unshuffleBytes( buf, size ); flipBytes( buf, size ); setIVec( ivec, iv64, key ); EVP_DecryptInit_ex( &stream_dec, NULL, NULL, NULL, ivec); EVP_DecryptUpdate( &stream_dec, buf, &dstLen, buf, size ); EVP_DecryptFinal_ex( &stream_dec, buf+dstLen, &tmpLen ); EVP_CIPHER_CTX_cleanup(&stream_dec); unshuffleBytes( buf, size ); dstLen += tmpLen; if(dstLen != size) { } return 1; } static int crypt_all(int *pcount, struct db_salt *salt); static int crypt_all_benchmark(int *pcount, struct db_salt *salt); static void init(struct fmt_main *self) { char build_opts[64]; static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; #if 0 me = self; #endif if ((v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int))) > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), OCL_ALGORITHM_NAME " %ux" CPU_ALGORITHM_NAME, v_width); self->params.algorithm_name = valgo; } snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, OUTLEN, PLAINTEXT_LENGTH, v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2*HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(pbkdf2_state), 0); //Auto tune execution from shared/included code. self->methods.crypt_all = crypt_all_benchmark; autotune_run(self, 2 * (ITERATIONS - 1) + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); self->methods.crypt_all = crypt_all; self->params.min_keys_per_crypt = local_work_size * v_width; self->params.max_keys_per_crypt = global_work_size * v_width; } static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p; int res; if (strncmp(ciphertext, "$encfs$", 7)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 7; if ((p = strtok(ctcopy, "*")) == NULL) /* key size */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* iterations */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* cipher */ goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt length */ goto err; res = atoi(p); if (res > 40) goto err; if ((p = strtok(NULL, "*")) == NULL) /* salt */ goto err; if (res * 2 != strlen(p)) goto err; if (!ishex(p)) goto err; if ((p = strtok(NULL, "*")) == NULL) /* data length */ goto err; res = atoi(p); if (res > 128) goto err; if ((p = strtok(NULL, "*")) == NULL) /* data */ goto err; if (res * 2 != strlen(p)) goto err; if (!ishex(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static encfs_cpu_salt cs; ctcopy += 7; p = strtok(ctcopy, "*"); cs.keySize = atoi(p); switch(cs.keySize) { case 128: cs.blockCipher = EVP_aes_128_cbc(); cs.streamCipher = EVP_aes_128_cfb(); break; case 192: cs.blockCipher = EVP_aes_192_cbc(); cs.streamCipher = EVP_aes_192_cfb(); break; case 256: default: cs.blockCipher = EVP_aes_256_cbc(); cs.streamCipher = EVP_aes_256_cfb(); break; } cs.keySize = cs.keySize / 8; p = strtok(NULL, "*"); cs.iterations = atoi(p); p = strtok(NULL, "*"); cs.cipher = atoi(p); p = strtok(NULL, "*"); cs.saltLen = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.saltLen; i++) cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtok(NULL, "*"); cs.dataLen = atoi(p); p = strtok(NULL, "*"); for (i = 0; i < cs.dataLen; i++) cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; cs.ivLength = EVP_CIPHER_iv_length( cs.blockCipher ); MEM_FREE(keeptr); return (void *) &cs; } static void set_salt(void *salt) { cur_salt = (encfs_cpu_salt*)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltLen); currentsalt.length = cur_salt->saltLen; currentsalt.iterations = cur_salt->iterations; currentsalt.outlen = cur_salt->keySize + cur_salt->ivLength; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i, j, index; size_t scalar_gws; global_work_size = ((count + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size; scalar_gws = global_work_size * v_width; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu if (new_keys) { HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, NULL), "Copy data to gpu"); new_keys = 0; } /// Run kernels HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, &local_work_size, 0, NULL, firstEvent), "Run initial kernel"); for (j = 0; j < ((currentsalt.outlen + 19) / 20); j++) { for (i = 0; i < LOOP_COUNT; i++) { HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL), "Run loop kernel"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } HANDLE_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, &local_work_size, 0, NULL, NULL), "Run intermediate kernel"); } /// Read the result back HANDLE_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, NULL), "Copy result back"); #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i; unsigned char master[MAX_KEYLENGTH + MAX_IVLENGTH]; unsigned char tmpBuf[cur_salt->dataLen]; unsigned int checksum = 0; unsigned int checksum2 = 0; memcpy(master, output[index].dk, cur_salt->keySize + cur_salt->ivLength); // First N bytes are checksum bytes. for(i=0; i<KEY_CHECKSUM_BYTES; ++i) checksum = (checksum << 8) | (unsigned int)cur_salt->data[i]; memcpy( tmpBuf, cur_salt->data+KEY_CHECKSUM_BYTES, cur_salt->keySize + cur_salt->ivLength ); streamDecode(tmpBuf, cur_salt->keySize + cur_salt->ivLength ,checksum, master); checksum2 = MAC_32( tmpBuf, cur_salt->keySize + cur_salt->ivLength, master); if(checksum2 == checksum) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int crypt_all_benchmark(int *pcount, struct db_salt *salt) { size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = local_work_size ? ((*pcount + (v_width * local_work_size - 1)) / (v_width * local_work_size)) * local_work_size : *pcount / v_width; scalar_gws = global_work_size * v_width; #if 0 fprintf(stderr, "%s(%d) lws %zu gws %zu sgws %zu kpc %d/%d\n", __FUNCTION__, *pcount, local_work_size, global_work_size, scalar_gws, me->params.min_keys_per_crypt, me->params.max_keys_per_crypt); #endif /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernels BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[4]), "Copy result back"); return *pcount; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } #if FMT_MAIN_VERSION > 11 static unsigned int iteration_count(void *salt) { encfs_cpu_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations; } #endif struct fmt_main fmt_opencl_encfs = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { "iteration count", }, #endif tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { iteration_count, }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
dynamic_fmt.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2009-2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2009-2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * Generic 'scriptable' hash cracker for JtR * * Renamed and changed from md5_gen* to dynamic*. We handle MD5 and SHA1 * at the present time. More crypt types 'may' be added later. * Added SHA2 (SHA224, SHA256, SHA384, SHA512), GOST, Whirlpool crypt types. * Whirlpool use oSSSL if OPENSSL_VERSION_NUMBER >= 0x10000000, otherwise use sph_* code. * * There used to be a todo list, and other commenting here. It has been * moved to ./docs/dynamic_history.txt * * KNOWN issues, and things to do. * * 1. create a new optimize flag, MGF_PASS_AFTER_FIXEDSALT and * MGF_PASS_BEFORE_FIXEDSALT. Then create DynamicFunc__appendsalt_after_pass[12] * These would only be valid for a FIXED length salted format. Then * we can write the pass right into the buffer, and get_key() would read * it back from there, either skipping over the salt, or removing the salt * from the end. This would allow crypt($s.$p) and crypt($p.s) to be optimized * in the way of string loading, and many fewer buffer copies. So dyna_1 could * be optimized to something like: // dynamic_1 Joomla md5($p.$s) static DYNAMIC_primitive_funcp _Funcs_1[] = { //Flags=MGF_PASS_BEFORE_FIXEDSALT | MGF_SALTED // saltlen=3 (or whatever). This fixed size is 'key' DynamicFunc__appendsalt_after_pass1, DynamicFunc__crypt_md5, NULL }; * WELL, the fixed size salt, it 'may' not be key for the MGF_PASS_BEFORE_FIXEDSALT, * I think I can make that 'work' for variable sized salts. But for the * MGF_PASS_AFTER_FIXEDSALT, i.e. crypt($s.$p) the fixed size salt IS key. I would * like to store all PW's at salt_len offset in the buffer, and simply overwrite the * first part of each buffer with the salt, never moving the password after the first * time it is written. THEN it is very important this ONLY be allowed when we KNOW * the salt length ahead of time. * * 2. Change regen-salts to be generic. Add the logic to dynamic_fmt.c proper, and change * the fake-salts.c, and options so that 'generic' regen-salts can be done. */ #include <string.h> #include <time.h> #if AC_BUILT #include "autoconfig.h" #endif #include "arch.h" #if !FAST_FORMATS_OMP #ifdef _OPENMP #define FORCE_THREAD_MD5_body #endif #undef _OPENMP #endif #ifndef DYNAMIC_DISABLED #ifdef SIMD_COEF_32 #include "simd-intrinsics.h" #endif #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "md5.h" #include "md4.h" #include "dynamic.h" #include "options.h" #include "config.h" #include "sha.h" #include "sha2.h" #include "gost.h" #include "sph_haval.h" #include "sph_ripemd.h" #include "sph_tiger.h" #include "sph_md2.h" #include "sph_panama.h" #include "sph_skein.h" #include "sph_whirlpool.h" #include "memory.h" #include "unicode.h" #include "johnswap.h" #include "crc32.h" #include "aligned.h" #include "fake_salts.h" #include "base64_convert.h" #if (AC_BUILT && HAVE_WHIRLPOOL) || \ (!AC_BUILT && OPENSSL_VERSION_NUMBER >= 0x10000000 && !HAVE_NO_SSL_WHIRLPOOL) #include <openssl/whrlpool.h> #else // on my 32 bit cygwin builds, this code is about 4x slower than the oSSL code. #define WHIRLPOOL_CTX sph_whirlpool_context #define WHIRLPOOL_Init(a) sph_whirlpool_init(a) #define WHIRLPOOL_Update(a,b,c) sph_whirlpool(a,b,c) #define WHIRLPOOL_Final(a,b) sph_whirlpool_close(b,a) #endif #include "KeccakHash.h" #define KECCAK_CTX Keccak_HashInstance #define KECCAK_Update(a,b,c) Keccak_HashUpdate(a,b,(c)*8) #define KECCAK_Final(a,b) Keccak_HashFinal(b,a) #define KECCAK_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x01) #define KECCAK_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x01) // FIPS202 complient #define SHA3_224_Init(hash) Keccak_HashInitialize(hash, 1152, 448, 224, 0x06) #define SHA3_256_Init(hash) Keccak_HashInitialize(hash, 1088, 512, 256, 0x06) #define SHA3_384_Init(hash) Keccak_HashInitialize(hash, 832, 768, 384, 0x06) #define SHA3_512_Init(hash) Keccak_HashInitialize(hash, 576, 1024, 512, 0x06) #ifdef _OPENMP #include <omp.h> static unsigned int m_ompt; #endif #include "dynamic_types.h" #include "memdbg.h" #if (defined (_OPENMP)||defined(FORCE_THREAD_MD5_body)) && defined (_MSC_VER) unsigned DES_bs_max_kpc, DES_bs_min_kpc, DES_bs_all_p; #undef MD5_body extern void MD5_body(MD5_word x[15],MD5_word out[4]); #endif #define STRINGIZE2(s) #s #define STRINGIZE(s) STRINGIZE2(s) static struct fmt_main fmt_Dynamic; static struct fmt_main *pFmts; static int nFmts; static int nLocalFmts; static struct fmt_main *pLocalFmts; static int force_md5_ctx; static void dynamic_RESET(struct fmt_main *fmt); #define eLargeOut dyna_eLargeOut eLargeOut_t *eLargeOut; #define nLargeOff dyna_nLargeOff unsigned *nLargeOff; #if ARCH_LITTLE_ENDIAN #define MD5_swap(x, y, count) #define MD5_swap2(a,b,c,d,e) #else extern char *MD5_DumpHexStr(void *p); static void MD5_swap(MD5_word *x, MD5_word *y, int count) { do { *y++ = JOHNSWAP(*x++); } while (--count); } #if MD5_X2 static void MD5_swap2(MD5_word *x, MD5_word *x2, MD5_word *y, MD5_word *y2, int count) { do { *y++ = JOHNSWAP(*x++); *y2++ = JOHNSWAP(*x2++); } while (--count); } #endif #endif #define FORMAT_LABEL "dynamic" #define FORMAT_NAME "Generic MD5" #ifdef SIMD_COEF_32 #define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + ((i)&3) ) #define SHAGETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3) )*SIMD_COEF_32 + (3-((i)&3)) ) //for endianity conversion #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CIPHERTEXT_LENGTH 32 #define BINARY_SIZE 16 #define BINARY_SIZE_SHA 20 #define BINARY_ALIGN MEM_ALIGN_WORD // Computation for 'salt_size' The salt (and salt2) is appended to the end of the hash entry. // The format of a salted entry is: $dynamic_#$hash$SALT_VAL[$$2SALT2_VAL] // salt 64 bytes, // salt2 64 bytes, // salt signature $ 1 byte // salt2 signature $$2 3 bytes // null termination 1 byte. This this allows 2 64 byte salt's. // Note, we now have up to 10 of these. #define SALT_SIZE (64*4+1+3+1) #define SALT_ALIGN MEM_ALIGN_WORD // slots to do 24 'tests'. Note, we copy the // same 3 tests over and over again. Simply to validate that // tests use 'multiple' blocks. static struct fmt_tests dynamic_tests[] = { {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}, {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL}, {NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL},{NULL} }; #ifdef SIMD_COEF_32 // SSE2 works only with 54 byte keys. Thus, md5(md5($p).md5($s)) can NOT be used // with the SSE2, since that final md5 will be over a 64 byte block of data. static union SIMD_inpup { uint32_t w[(64*SIMD_COEF_32)/sizeof(uint32_t)]; unsigned char c[64*SIMD_COEF_32]; } *input_buf, *input_buf2; static union SIMD_crypt { uint32_t w[(BINARY_SIZE*SIMD_COEF_32)/sizeof(uint32_t)]; unsigned char c[BINARY_SIZE*SIMD_COEF_32]; } *crypt_key, *crypt_key2; static unsigned int (*total_len)[SIMD_COEF_32]; static unsigned int (*total_len2)[SIMD_COEF_32]; #define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS) #define MMX_INP_BUF2_SZ (sizeof(input_buf2[0])*BLOCK_LOOPS) #define MMX_TOT_LEN_SZ (sizeof(*total_len) *BLOCK_LOOPS) #define MMX_TOT_LEN2_SZ (sizeof(*total_len2)*BLOCK_LOOPS) #define MMX_INP_BUF_SZ (sizeof(input_buf[0]) *BLOCK_LOOPS) #define MMX_CRYPT_KEY_SZ (sizeof(crypt_key[0]) *BLOCK_LOOPS+sizeof(crypt_key[0])) #define MMX_CRYPT_KEY2_SZ (sizeof(crypt_key2[0])*BLOCK_LOOPS) #endif #define FLAT_INP_BUF_SZ (sizeof(MD5_IN)*(MAX_KEYS_PER_CRYPT_X86>>MD5_X2)) #define FLAT_TOT_LEN_SZ (sizeof(unsigned int)*(MAX_KEYS_PER_CRYPT_X86)) MD5_OUT *crypt_key_X86; MD5_OUT *crypt_key2_X86; MD5_IN *input_buf_X86; MD5_IN *input_buf2_X86; unsigned int *total_len_X86; unsigned int *total_len2_X86; BIG_HASH_OUT dynamic_BHO[4]; static int keys_dirty; // We store the salt here static unsigned char *cursalt; // length of salt (so we don't have to call strlen() all the time. static int saltlen; int get_dynamic_fmt_saltlen() { return saltlen; } // This array is for the 2nd salt in the hash. I know of no hashes with double salts, // but test type dynamic_16 (which is 'fake') has 2 salts, and this is the data/code to // handle double salts. static unsigned char *cursalt2; static int saltlen2; static unsigned char *username; static int usernamelen; static unsigned char *flds[10]; static int fld_lens[10]; const char *dynamic_itoa16 = itoa16; #if !defined (_DEBUG) #define itoa16_w2 __Dynamic_itoa_w2 #define itoa16_w2_u __Dynamic_itoa_w2_u #define itoa16_w2_l __Dynamic_itoa_w2_l #endif unsigned short itoa16_w2_u[256], itoa16_w2_l[256]; unsigned short *itoa16_w2=itoa16_w2_l; // array of the keys. Also lengths of the keys. NOTE if store_keys_in_input, then the // key array will NOT be used (but the length array still is). #ifndef MAX_KEYS_PER_CRYPT #define MAX_KEYS_PER_CRYPT MAX_KEYS_PER_CRYPT_X86 #endif #ifndef PLAINTEXT_LENGTH #define PLAINTEXT_LENGTH PLAINTEXT_LENGTH_X86 #endif #define EFFECTIVE_MKPC (MAX_KEYS_PER_CRYPT > MAX_KEYS_PER_CRYPT_X86 ? MAX_KEYS_PER_CRYPT : MAX_KEYS_PER_CRYPT_X86) #define EFFECTIVE_MAX_LENGTH (PLAINTEXT_LENGTH > PLAINTEXT_LENGTH_X86 ? PLAINTEXT_LENGTH : PLAINTEXT_LENGTH_X86) // Used to compute length of each string to clean. This is needed, since we have to clean a little more than // just the length, IF we are cleaning strings that are in different endianity than native for the CPU. // This is seen on SHA224 (etc) on Intel, or MD5 of BE systems. We still try to clean 'only' as much as // we need to, but that is usually MORE than what the length of the stored string is. 8 gives us 7 byte spill // over, plus 1 byte for the 0x80 #define COMPUTE_EX_LEN(a) ( (a) > (sizeof(input_buf_X86[0].x1.b)-8) ) ? sizeof(input_buf_X86[0].x1.b) : ((a)+8) // this new 'ENCODED_EFFECTIVE_MAX_LENGTH' needed, since we grab up to 125 bytes of data WHEN in -encode:utf8 mode for a unicode format. #define ENCODED_EFFECTIVE_MAX_LENGTH (EFFECTIVE_MAX_LENGTH > 125 ? EFFECTIVE_MAX_LENGTH : 125) static char saved_key[EFFECTIVE_MKPC][ENCODED_EFFECTIVE_MAX_LENGTH + 1]; static int saved_key_len[EFFECTIVE_MKPC]; // this is the max generic location we should target. This keeps us from having blown MD buffers or overwrite // when in utf8->utf16 mode, where we are handling data that likely is larger than we should handle. We have to // handle this larger data, so that we get as many strings with 1 byte utf8 that would convert to data that would // blow our buffers. But we want as many as possible for the 2 and 3 byte utf data. #define MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE (256-17) // Used in 'get_key' if we are running in store_keys_in_input mode static char out[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; // This is the GLOBAL count of keys. ALL of the primitives which deal with a count // will read from this variable. #if !defined (_DEBUG) #define m_count m_Dynamic_Count #endif unsigned int m_count; // If we are run in 'specific' mode (say, -format=dynamic -subformat=dynamic_0, then we // want to 'allow' bare hashes to be 'valid'. This is how we will do this. We have a boolean // that if set to true, we will perform a 1 time check within the valid function. If at // that time we find out that we are cracking (or showing, etc) that we will accept lines // that are either format of $dynamic_0$hhhhhh...32 or simply in the format of hhhhhhh..32 int dynamic_allow_rawhash_fixup = 0; // this one IS in the private_dat, but since it is accessed SO much, we pull it // out prior to 'internal' processing. The others are accessed right from // the structure, since there are accessed infrequently enough to not matter. static int dynamic_use_sse; // If set to 1, then do unicode conversion is many string setting functions. static int *md5_unicode_convert; #if !defined (_DEBUG) #define curdat Dynamic_curdat #endif private_subformat_data curdat; // Helper function that loads out 256 unsigned short array that does base-16 conversions // This function is called at the 'validation' call that loads our preloads (i.e. only // called one time, pre 'run' (but will be called multiple times when benchmarking, but // will NOT impact benchmark times.) Loading a word at a time (2 bytes), sped up // the overall run time of dynamic_2 almost 5%, thus this conversion is MUCH faster than // the fastest byte by byte I could put together. I tested several ways to access this // array of unsigned shorts, and the best way was a 2 step method into an array of long // integer pointers (thus, load 1/2 the 32 bit word, then the other 1/2, into a 32 bit word). /********************************************************************************* ********************************************************************************* * Start of the 'normal' *_fmt code for md5-gen ********************************************************************************* *********************************************************************************/ char *RemoveHEX(char *output, char *input) { char *cpi = input; char *cpo = output; char *cpH = strstr(input, "$HEX$"); if (!cpH) { // should never get here, we have a check performed before this function is called. strcpy(output, input); return output; } while (cpi < cpH) *cpo++ = *cpi++; *cpo++ = *cpi; cpi += 5; while (*cpi) { if (*cpi == '0' && cpi[1] == '0') { strcpy(output, input); return output; } if (atoi16[ARCH_INDEX(*cpi)] != 0x7f && atoi16[ARCH_INDEX(cpi[1])] != 0x7f) { *cpo++ = atoi16[ARCH_INDEX(*cpi)]*16 + atoi16[ARCH_INDEX(cpi[1])]; cpi += 2; } else if (*cpi == '$') { while (*cpi && strncmp(cpi, "$HEX$", 5)) { *cpo++ = *cpi++; } if (!strncmp(cpi, "$HEX$", 5)) { *cpo++ = *cpi; cpi += 5; } } else { strcpy(output, input); return output; } } *cpo = 0; return output; } /********************************************************************************* * Detects a 'valid' md5-gen format. This function is NOT locked to anything. It * takes its detection logic from the provided fmt_main pointer. Within there, * is a 'private' data pointer. When john first loads the md5-gen, it calls a * function which builds proper 'private' data for EACH type of md5-gen. Then * john will call valid on EACH of those formats, asking each one if a string is * valid. Each format has a 'private' properly setup data object. *********************************************************************************/ static int valid(char *ciphertext, struct fmt_main *pFmt) { unsigned int i, cipherTextLen; char *cp, fixed_ciphertext[1024]; private_subformat_data *pPriv = pFmt->private.data; if (!pPriv) return 0; if (strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG))) return 0; /* Quick cancel of huge lines (eg. zip archives) */ if (strnlen(ciphertext, LINE_BUFFER_SIZE + 1) > LINE_BUFFER_SIZE) return 0; // this is now simply REMOVED totally, if we detect it. Doing this solves MANY other problems // of leaving it in there. The ONLY problem we still have is NULL bytes. if (strstr(ciphertext, "$HEX$")) { if (strnlen(ciphertext, sizeof(fixed_ciphertext) + 1) < sizeof(fixed_ciphertext)) ciphertext = RemoveHEX(fixed_ciphertext, ciphertext); } cp = &ciphertext[strlen(pPriv->dynamic_WHICH_TYPE_SIG)]; if (pPriv->dynamic_base64_inout == 1 || pPriv->dynamic_base64_inout == 3 || pPriv->dynamic_base64_inout == 5) { // jgypwqm.JsMssPLiS8YQ00$BaaaaaSX unsigned int len; len = base64_valid_length(cp, pPriv->dynamic_base64_inout==3?e_b64_mime:e_b64_crypt, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len < 20 || len > pPriv->dynamic_SALT_OFFSET+4) return 0; if (pPriv->dynamic_FIXED_SALT_SIZE == 0) return !cp[len]; if (pPriv->dynamic_FIXED_SALT_SIZE && cp[len] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[len+1]) != pPriv->dynamic_FIXED_SALT_SIZE) return 0; else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[len+1]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; return 1; } if (pPriv->dynamic_base64_inout == 2) { // h3mJrcH0901pqX/m$alex unsigned int i; for (i = 0; i < 16; ++i) { if (atoi64[ARCH_INDEX(cp[i])] == 0x7F) return 0; } if (pPriv->dynamic_FIXED_SALT_SIZE == 0) return !cp[i]; if (pPriv->dynamic_FIXED_SALT_SIZE && cp[16] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&cp[17]) != pPriv->dynamic_FIXED_SALT_SIZE) return 0; else if (pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&cp[17]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; if (strlen(cp) < 16) return 0; return 1; } if (strlen(cp) < 32) return 0; cipherTextLen = CIPHERTEXT_LENGTH; if (pPriv->dynamic_40_byte_input) { cipherTextLen = 40; } else if (pPriv->dynamic_48_byte_input) { cipherTextLen = 48; } else if (pPriv->dynamic_64_byte_input) { cipherTextLen = 64; } else if (pPriv->dynamic_56_byte_input) { cipherTextLen = 56; } else if (pPriv->dynamic_80_byte_input) { cipherTextLen = 80; } else if (pPriv->dynamic_96_byte_input) { cipherTextLen = 96; } else if (pPriv->dynamic_128_byte_input) { cipherTextLen = 128; } for (i = 0; i < cipherTextLen; i++) { if (atoi16[ARCH_INDEX(cp[i])] == 0x7f) return 0; } if ((pPriv->pSetup->flags&MGF_SALTED) == 0) { if (!cp[cipherTextLen]) return 1; return 0; } if (cp[cipherTextLen] && cp[cipherTextLen] != '$') return 0; // NOTE if looking at this in the future, this was not my fix. if (strlen(&cp[cipherTextLen]) > SALT_SIZE) return 0; // end NOTE. if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && ciphertext[pPriv->dynamic_SALT_OFFSET-1] != '$') return 0; if (pPriv->dynamic_FIXED_SALT_SIZE > 0 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) != pPriv->dynamic_FIXED_SALT_SIZE) { // first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then // validate length with this in mind. if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) { int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]); len = (len-4)>>1; if (len != pPriv->dynamic_FIXED_SALT_SIZE) return 0; } else { // check if there is a 'salt-2' or 'username', etc If that is the case, then this is still valid. if (strncmp(&ciphertext[pPriv->dynamic_SALT_OFFSET+pPriv->dynamic_FIXED_SALT_SIZE], "$$", 2)) return 0; } } else if (!regen_salts_options && pPriv->dynamic_FIXED_SALT_SIZE < -1 && strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]) > -(pPriv->dynamic_FIXED_SALT_SIZE)) { char *cpX; // first check to see if this salt has left the $HEX$ in the string (i.e. embedded nulls). If so, then // validate length with this in mind. if (!memcmp(&ciphertext[pPriv->dynamic_SALT_OFFSET], "HEX$", 4)) { int len = strlen(&ciphertext[pPriv->dynamic_SALT_OFFSET]); len = (len-4)>>1; if (len > -(pPriv->dynamic_FIXED_SALT_SIZE)) return 0; } else { // check if there is a 'salt-2' or 'username', etc If that is the case, then this is still 'valid' cpX = mem_alloc(-(pPriv->dynamic_FIXED_SALT_SIZE) + 3); strnzcpy(cpX, &ciphertext[pPriv->dynamic_SALT_OFFSET], -(pPriv->dynamic_FIXED_SALT_SIZE) + 3); if (!strstr(cpX, "$$")) { MEM_FREE(cpX); return 0; } MEM_FREE(cpX); } } if (pPriv->b2Salts==1 && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$2")) return 0; if (pPriv->nUserName && !strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], "$$U")) return 0; if (pPriv->FldMask) { for (i = 0; i < 10; ++i) { if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) { char Fld[8]; sprintf(Fld, "$$F%d", i); if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld)) return 0; } } } return 1; } static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv); static struct fmt_main *dynamic_Get_fmt_main(int which); static char *HandleCase(char *cp, int caseType); // 'wrapper' functions. These are here, so we can call these functions to work on ALL data (not simply within the // thead, which ONLY wants to work on a subset of the data. These functions should NOT be called by threading // code, EVER. But this functions KNOW what to do. Some actually have threads, others do not need them. #ifdef _OPENMP #ifndef SIMD_COEF_32 const unsigned int OMP_INC = (MD5_X2+1); const unsigned int OMP_MD5_INC = (MD5_X2+1); const unsigned int OMP_MD4_INC = (MD5_X2+1); const unsigned int OMP_SHA1_INC = (MD5_X2+1); #else const unsigned int OMP_INC = (MD5_X2+1); const unsigned int OMP_MD5_INC = (SIMD_PARA_MD5*SIMD_COEF_32); const unsigned int OMP_MD4_INC = (SIMD_PARA_MD4*SIMD_COEF_32); const unsigned int OMP_SHA1_INC = (SIMD_PARA_SHA1*SIMD_COEF_32); #endif // SIMD_COEF_32 #endif // _OPENMP inline static void __nonMP_DynamicFunc__SSEtoX86_switch_output2() { #ifdef _OPENMP DynamicFunc__SSEtoX86_switch_output2(0,m_count,0); #else DynamicFunc__SSEtoX86_switch_output2(); #endif } inline static void __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16() { #ifdef _OPENMP DynamicFunc__append_from_last_output2_to_input1_as_base16(0,m_count,0); #else DynamicFunc__append_from_last_output2_to_input1_as_base16(); #endif } void __nonMP_eLargeOut(eLargeOut_t what) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) eLargeOut[i] = what; #endif eLargeOut[0] = what; } void __nonMP_nLargeOff(unsigned val) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) nLargeOff[i] = val; #endif nLargeOff[0] = val; } inline static void md5_unicode_convert_set(int what, int tid) { md5_unicode_convert[tid] = what; } inline static int md5_unicode_convert_get(int tid) { return md5_unicode_convert[tid]; } void __nonMP_md5_unicode_convert(int what) { #ifdef _OPENMP unsigned int i; for (i = 1; i < m_ompt; ++i) md5_unicode_convert[i] = what; #endif md5_unicode_convert[0] = what; } #if !defined (_OPENMP) #define md5_unicode_convert_set(what, tid) md5_unicode_convert_set(what, 0) #define md5_unicode_convert_get(tid) md5_unicode_convert_get(0) #define eLargeOut_set(what, tid) eLargeOut_set(what, 0) #define eLargeOut_get(tid) eLargeOut_get(0) #define nLargeOff_set(val, tid) nLargeOff_set(val, 0) #define nLargeOff_get(tid) nLargeOff_get(0) #endif inline static void __nonMP_DynamicFunc__append_keys2() { #ifdef _OPENMP DynamicFunc__append_keys2(0,m_count,0); #else DynamicFunc__append_keys2(); #endif } static void __possMP_DynamicFunc__crypt2_md5() { #ifdef _OPENMP int i; unsigned int inc = OMP_MD5_INC; // if (dynamic_use_sse!=1) // inc = OMP_INC; #pragma omp parallel for for (i = 0; i < m_count; i += inc) DynamicFunc__crypt2_md5(i,i+inc,omp_get_thread_num()); #else DynamicFunc__crypt2_md5(); #endif } static void __nonMP_DynamicFunc__clean_input() { unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(input_buf, 0, MMX_INP_BUF_SZ); memset(total_len, 0, MMX_TOT_LEN_SZ); return; } #endif for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) { //if (total_len_X86[i]) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; //} } return; } static void __nonMP_DynamicFunc__clean_input2() { unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(input_buf2, 0, MMX_INP_BUF2_SZ); memset(total_len2, 0, MMX_TOT_LEN2_SZ); return; } #endif if (curdat.using_flat_buffers_sse2_ok) { memset(total_len2_X86, 0, sizeof(total_len2_X86[0])*MAX_KEYS_PER_CRYPT_X86); return; } for (; i < MAX_KEYS_PER_CRYPT_X86; ++i) { //if (total_len2_X86[i]) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; //} } return; } static void __nonMP_DynamicFunc__clean_input_full() { #ifdef SIMD_COEF_32 memset(input_buf, 0, MMX_INP_BUF_SZ); memset(total_len, 0, MMX_TOT_LEN_SZ); #endif memset(input_buf_X86, 0, FLAT_INP_BUF_SZ); memset(total_len_X86, 0, FLAT_TOT_LEN_SZ); } static void __nonMP_DynamicFunc__clean_input2_full() { #ifdef SIMD_COEF_32 memset(input_buf2, 0, MMX_INP_BUF2_SZ); memset(total_len2, 0, MMX_TOT_LEN2_SZ); #endif memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ); memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ); } static void __nonMP_DynamicFunc__clean_input_kwik() { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(total_len, 0, MMX_TOT_LEN_SZ); return; } #endif memset(total_len_X86, 0, FLAT_TOT_LEN_SZ); #if !ARCH_LITTLE_ENDIAN memset(input_buf_X86, 0, FLAT_INP_BUF_SZ); #endif } #ifndef _OPENMP static void __nonMP_DynamicFunc__clean_input2_kwik() { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { memset(total_len2, 0, MMX_TOT_LEN2_SZ); return; } #endif memset(total_len2_X86, 0, FLAT_TOT_LEN_SZ); #if !ARCH_LITTLE_ENDIAN memset(input_buf2_X86, 0, FLAT_INP_BUF_SZ); #endif } #endif /********************************************************************************* * init() here does nothing. NOTE many formats LINKING into us will have a valid * that DOES do something, but ours does nothing. *********************************************************************************/ static void init(struct fmt_main *pFmt) { private_subformat_data *pPriv = pFmt->private.data; unsigned int i; //fprintf(stderr, "init(%s)\n", pPriv->dynamic_WHICH_TYPE_SIG); /* first off, SAVE the original format structure (owned by JtR). We may need this later */ pPriv->pFmtMain = pFmt; #ifdef _OPENMP m_ompt = omp_get_max_threads(); if (!md5_unicode_convert) { md5_unicode_convert = (int*)mem_calloc(m_ompt, sizeof(int)); eLargeOut = (eLargeOut_t*)mem_calloc(m_ompt, sizeof(eLargeOut_t)); nLargeOff = (unsigned*)mem_calloc(m_ompt, sizeof(unsigned)); for (i = 0; i < m_ompt; ++i) { eLargeOut[i] = eBase16; nLargeOff[i] = 0; } } #else if (!md5_unicode_convert) { md5_unicode_convert = (int*)mem_calloc(1, sizeof(int)); eLargeOut = (eLargeOut_t*)mem_calloc(1, sizeof(eLargeOut_t)); eLargeOut[0] = eBase16; nLargeOff = (unsigned*)mem_calloc(1, sizeof(unsigned)); nLargeOff[0] = 0; } #endif #ifdef SIMD_COEF_32 if (!input_buf) { input_buf = mem_calloc_align(1, MMX_INP_BUF_SZ, MEM_ALIGN_SIMD); total_len = mem_calloc_align(1, MMX_TOT_LEN_SZ, MEM_ALIGN_SIMD); total_len2 = mem_calloc_align(1, MMX_TOT_LEN2_SZ, MEM_ALIGN_SIMD); input_buf2 = mem_calloc_align(1, MMX_INP_BUF2_SZ, MEM_ALIGN_SIMD); crypt_key = mem_calloc_align(1, MMX_CRYPT_KEY_SZ, MEM_ALIGN_SIMD); crypt_key2 = mem_calloc_align(1, MMX_CRYPT_KEY2_SZ, MEM_ALIGN_SIMD); } #endif if (!crypt_key_X86) { crypt_key_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key_X86)); crypt_key2_X86 = (MD5_OUT *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*crypt_key2_X86)); input_buf_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf_X86)); input_buf2_X86 = (MD5_IN *)mem_calloc(((MAX_KEYS_PER_CRYPT_X86>>MD5_X2)+1), sizeof(*input_buf2_X86)); total_len_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len_X86)); total_len2_X86 = (unsigned int *)mem_calloc((MAX_KEYS_PER_CRYPT_X86+1), sizeof(*total_len2_X86)); } for (i = 0; i < 4; ++i) dynamic_BHO[i].dat = mem_calloc_align(BLOCK_LOOPS, sizeof(*(dynamic_BHO[0].dat)), MEM_ALIGN_SIMD); gost_init_table(); if (!pPriv || (pPriv->init == 1 && !strcmp(curdat.dynamic_WHICH_TYPE_SIG, pPriv->dynamic_WHICH_TYPE_SIG))) return; __nonMP_DynamicFunc__clean_input_full(); __nonMP_DynamicFunc__clean_input2_full(); // Some builds (omp vs non omp, etc) do not call these functions, so to avoid 'unused' warnings, we simply // call them here. __nonMP_DynamicFunc__clean_input_kwik(); dynamic_RESET(pFmt); if (!pPriv) return; pPriv->init = 1; memcpy(&curdat, pPriv, sizeof(private_subformat_data)); dynamic_use_sse = curdat.dynamic_use_sse; force_md5_ctx = curdat.force_md5_ctx; fmt_Dynamic.params.max_keys_per_crypt = pFmt->params.max_keys_per_crypt; fmt_Dynamic.params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; fmt_Dynamic.params.flags = pFmt->params.flags; fmt_Dynamic.params.format_name = pFmt->params.format_name; fmt_Dynamic.params.algorithm_name = pFmt->params.algorithm_name; fmt_Dynamic.params.benchmark_comment = pFmt->params.benchmark_comment; fmt_Dynamic.params.benchmark_length = pFmt->params.benchmark_length; // we allow for 3 bytes of utf8 data to make up the number of plaintext_length unicode chars. if ( (pFmt->params.flags&FMT_UNICODE) && options.target_enc == UTF_8 ) { //printf ("Here pFmt->params.plaintext_length=%d pPriv->pSetup->MaxInputLen=%d\n", pFmt->params.plaintext_length, pPriv->pSetup->MaxInputLen); pFmt->params.plaintext_length = MIN(125, pFmt->params.plaintext_length * 3); } else fmt_Dynamic.params.plaintext_length = pFmt->params.plaintext_length; fmt_Dynamic.params.salt_size = pFmt->params.salt_size; fmt_Dynamic.params.flags = pFmt->params.flags; fmt_Dynamic.methods.cmp_all = pFmt->methods.cmp_all; fmt_Dynamic.methods.cmp_one = pFmt->methods.cmp_one; fmt_Dynamic.methods.cmp_exact = pFmt->methods.cmp_exact; fmt_Dynamic.methods.set_salt = pFmt->methods.set_salt; fmt_Dynamic.methods.salt = pFmt->methods.salt; fmt_Dynamic.methods.salt_hash = pFmt->methods.salt_hash; fmt_Dynamic.methods.split = pFmt->methods.split; fmt_Dynamic.methods.set_key = pFmt->methods.set_key; fmt_Dynamic.methods.get_key = pFmt->methods.get_key; fmt_Dynamic.methods.clear_keys = pFmt->methods.clear_keys; fmt_Dynamic.methods.crypt_all = pFmt->methods.crypt_all; for (i = 0; i < PASSWORD_HASH_SIZES; ++i) { fmt_Dynamic.methods.binary_hash[i] = pFmt->methods.binary_hash[i]; fmt_Dynamic.methods.get_hash[i] = pFmt->methods.get_hash[i]; } #if !MD5_IMM { extern void MD5_std_init(struct fmt_main *pFmt); MD5_std_init(pFmt); } #endif if (curdat.input2_set_len32) { for (i = 0; i < MAX_KEYS_PER_CRYPT_X86; ++i) total_len2_X86[i] = 32; #ifdef SIMD_COEF_32 for (i = 0; i < BLOCK_LOOPS; ++i) { unsigned int j; for (j = 0; j < SIMD_COEF_32; j++) { input_buf2[i].c[GETPOS(32, j)] = 0x80; input_buf2[i].c[GETPOS(57, j)] = 0x1; total_len2[i][j] = 0x20; } } #endif } } static void done(void) { int i; MEM_FREE(total_len2_X86); MEM_FREE(total_len_X86); MEM_FREE(input_buf2_X86); MEM_FREE(input_buf_X86); MEM_FREE(crypt_key2_X86); MEM_FREE(crypt_key_X86); #ifdef SIMD_COEF_32 MEM_FREE(crypt_key2); MEM_FREE(crypt_key); MEM_FREE(input_buf2); MEM_FREE(total_len2); MEM_FREE(total_len); MEM_FREE(input_buf); #endif MEM_FREE(nLargeOff); MEM_FREE(eLargeOut); MEM_FREE(md5_unicode_convert); for (i = 0; i < 4; ++i) MEM_FREE(dynamic_BHO[i].dat); } /********************************************************************************* * This function will add a $dynamic_#$ IF there is not one, and if we have a specific * format requested. Also, it will add things like UserID, Domain, Fld3, Fld4, * Fld5, etc. *********************************************************************************/ static char *prepare(char *split_fields[10], struct fmt_main *pFmt) { private_subformat_data *pPriv = pFmt->private.data; char Tmp[80]; int i; int trim_u=0; char *cpBuilding=split_fields[1]; if (!pPriv) return split_fields[1]; // ANY field[1] longer than 490 will simply be ignored, and returned 'as is'. // the rest of this function makes this assumption. if (!cpBuilding || strnlen(cpBuilding, 491) > 490) return cpBuilding; // mime. We want to strip off ALL trailing '=' characters to 'normalize' them if (pPriv->dynamic_base64_inout == 3 && !strncmp(cpBuilding, "$dynamic_", 9)) { static char ct[496]; int len; char *cp = strchr(&cpBuilding[9], '$'), *cp2; if (!cp) return cpBuilding; ++cp; len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len && cp[len-1] == '=') { strnzcpy(ct, cpBuilding, cp-cpBuilding+len+1); cp2 = &ct[strlen(ct)-1]; while (*cp2 == '=') *cp2-- = 0; if (cp[len]) strcat(cp2, &cp[len]); cpBuilding = ct; } } if (pFmt->params.salt_size && !strchr(split_fields[1], '$')) { if (!pPriv->nUserName && !pPriv->FldMask && options.regen_lost_salts == 0) return split_fields[1]; } // handle 'older' md5_gen(x) signature, by simply converting to $dynamic_x$ signature // Thus older md5_gen() is a valid input (or from john.pot), but ONLY the newer // $dynamic_x$ will be written out (into .pot, output lines, etc). if (!strncmp(cpBuilding, "md5_gen(", 8)) { static char ct[496]; char *cp = &cpBuilding[8], *cpo = &ct[sprintf(ct, "$dynamic_")]; while (*cp >= '0' && *cp <= '9') *cpo++ = *cp++; *cpo++ = '$'; ++cp; strcpy(cpo, cp); cpBuilding = ct; } // At this point, max length of cpBuilding is 491 (if it was a md5_gen signature) // allow a raw hash, if there is a $u but no salt if (pPriv->nUserName && split_fields[0][0] && !strchr(cpBuilding, '$') && strcmp(split_fields[0], "?")) { static char ct[496]; strcpy(ct, cpBuilding); strcat(ct, "$$U"); cpBuilding = ct; trim_u=1; } cpBuilding = FixupIfNeeded(cpBuilding, pPriv); if (trim_u) cpBuilding[strlen(cpBuilding)-3] = 0; // at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506 if (strncmp(cpBuilding, "$dynamic_", 9)) { // ok, here we add the 'generic' regen salt code if (options.regen_lost_salts && !strchr(cpBuilding, '$')) { char *cp = load_regen_lost_salt_Prepare(cpBuilding); if (cp) return cp; } return split_fields[1]; } if ( (pPriv->pSetup->flags&MGF_SALTED) == 0) return cpBuilding; /* at this point, we want to convert ANY and all $HEX$hex into values */ /* the reason we want to do this, is so that things read from john.pot file will be in proper 'native' format */ /* the ONE exception to this, is if there is a NULL byte in the $HEX$ string, then we MUST leave that $HEX$ string */ /* alone, and let the later calls in dynamic.c handle them. */ if (strstr(cpBuilding, "$HEX$")) { char *cp, *cpo; int bGood=1; static char ct[512]; strcpy(ct, cpBuilding); cp = strstr(ct, "$HEX$"); cpo = cp; *cpo++ = *cp; cp += 5; while (*cp && bGood) { if (*cp == '0' && cp[1] == '0') { bGood = 0; break; } if (atoi16[ARCH_INDEX(*cp)] != 0x7f && atoi16[ARCH_INDEX(cp[1])] != 0x7f) { *cpo++ = atoi16[ARCH_INDEX(*cp)]*16 + atoi16[ARCH_INDEX(cp[1])]; *cpo = 0; cp += 2; } else if (*cp == '$') { while (*cp && strncmp(cp, "$HEX$", 5)) { *cpo++ = *cp++; } *cpo = 0; if (!strncmp(cp, "$HEX$", 5)) { *cpo++ = *cp; cp += 5; } } else { return split_fields[1]; } } if (bGood) cpBuilding = ct; // if we came into $HEX$ removal, then cpBuilding will always be shorter } // at this point max length is still < 512. 491 + strlen($dynamic_xxxxx$) is 506 if (pPriv->nUserName && !strstr(cpBuilding, "$$U")) { if (split_fields[0] && split_fields[0][0] && strcmp(split_fields[0], "?")) { char *userName=split_fields[0], *cp; static char ct[1024]; // assume field[0] is in format: username OR DOMAIN\\username If we find a \\, then use the username 'following' it. cp = strchr(split_fields[0], '\\'); if (cp) userName = &cp[1]; userName = HandleCase(userName, pPriv->nUserName); snprintf (ct, sizeof(ct), "%s$$U%s", cpBuilding, userName); cpBuilding = ct; } } if (pPriv->FldMask) { for (i = 0; i < 10; ++i) { if (pPriv->FldMask&(MGF_FLDx_BIT<<i)) { sprintf(Tmp, "$$F%d", i); if (split_fields[i] && split_fields[i][0] && strcmp(split_fields[i], "/") && !strstr(cpBuilding, Tmp)) { static char ct[1024]; char ct2[1024]; snprintf (ct2, sizeof(ct2), "%s$$F%d%s", cpBuilding, i, split_fields[i]); strcpy(ct, ct2); cpBuilding = ct; } } } } return cpBuilding; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[1024]; private_subformat_data *pPriv = pFmt->private.data; if (strnlen(ciphertext, 951) > 950) return ciphertext; // mime. We want to strip off ALL trailing '=' characters to 'normalize' them if (pPriv->dynamic_base64_inout == 3 && !strncmp(ciphertext, "$dynamic_", 9)) { static char ct[496]; unsigned int len; char *cp = strchr(&ciphertext[9], '$'), *cp2; if (cp) { ++cp; len = base64_valid_length(cp, e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); if (len && cp[len-1] == '=') { strnzcpy(ct, ciphertext, cp-ciphertext+len+1); cp2 = &ct[strlen(ct)-1]; while (*cp2 == '=') *cp2-- = 0; if (cp[len]) strcat(cp2, &cp[len]); ciphertext = ct; } } } if (!strncmp(ciphertext, "$dynamic", 8)) { if (strstr(ciphertext, "$HEX$")) return RemoveHEX(out, ciphertext); return ciphertext; } if (!strncmp(ciphertext, "md5_gen(", 8)) { ciphertext += 8; do ++ciphertext; while (*ciphertext != ')') ; ++ciphertext; } if (strstr(ciphertext, "$HEX$")) { char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG); RemoveHEX(cp, ciphertext); } else snprintf(out, sizeof(out), "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext); return out; } // This split unifies case. static char *split_UC(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[1024]; private_subformat_data *pPriv = pFmt->private.data; if (!strncmp(ciphertext, "$dynamic", 8)) { if (strstr(ciphertext, "$HEX$")) RemoveHEX(out, ciphertext); else strcpy(out, ciphertext); } else { if (!strncmp(ciphertext, "md5_gen(", 8)) { ciphertext += 8; do ++ciphertext; while (*ciphertext != ')') ; ++ciphertext; } if (strstr(ciphertext, "$HEX$")) { char *cp = out + sprintf(out, "%s", pPriv->dynamic_WHICH_TYPE_SIG); RemoveHEX(cp, ciphertext); } else sprintf(out, "%s%s", pPriv->dynamic_WHICH_TYPE_SIG, ciphertext); } ciphertext = strchr(&out[8], '$')+1; while (*ciphertext && *ciphertext != '$') { if (*ciphertext >= 'A' && *ciphertext <= 'Z') *ciphertext += 0x20; // ASCII specific, but I really do not care. ++ciphertext; } // printf("%s\n", out); return out; } /********************************************************************************* * Stores the new salt provided into our 'working' salt *********************************************************************************/ static void set_salt(void *salt) { unsigned char *cpsalt; unsigned int todo_bits=0, i, bit; if (!salt || curdat.dynamic_FIXED_SALT_SIZE == 0) { saltlen = 0; return; } cpsalt = *((unsigned char**)salt); saltlen = *cpsalt++ - '0'; saltlen <<= 3; saltlen += *cpsalt++ - '0'; #if ARCH_ALLOWS_UNALIGNED if (*((uint32_t*)cpsalt) != 0x30303030) #else if (memcmp(cpsalt, "0000", 4)) #endif { // this is why we used base-8. Takes an extra byte, but there is NO conditional // logic, building this number, and no multiplication. We HAVE added one conditional // check, to see if we can skip the entire load, if it is 0000. todo_bits = *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; todo_bits <<= 3; todo_bits += *cpsalt++ - '0'; } else cpsalt += 4; cursalt = cpsalt; if (!todo_bits) return; cpsalt += saltlen; if (todo_bits & 1) { todo_bits ^= 1; // clear that bit. saltlen2 = *cpsalt++; cursalt2 = cpsalt; if (todo_bits == 0) return; cpsalt += saltlen2; } if (todo_bits & 2) { todo_bits ^= 2; // clear that bit. usernamelen = *cpsalt++; username = cpsalt; if (todo_bits == 0) return; cpsalt += usernamelen; } bit = 4; for (i = 0; i < 10; ++i, bit<<=1) { if (todo_bits & bit) { todo_bits ^= bit; // clear that bit. fld_lens[i] = *cpsalt++; flds[i] = cpsalt; if (todo_bits == 0) return; cpsalt += fld_lens[i]; } } } /********************************************************************************* * Sets this key. It will either be dropped DIRECTLY into the input buffer * number 1, or put into an array of keys. Which one happens depends upon * HOW the generic functions were laid out for this type. Not all types can * load into the input. If not they MUST use the key array. Using the input * buffer is faster, when it can be safely done. *********************************************************************************/ static void set_key(char *key, int index) { unsigned int len; //printf("idx=%d key=%s\n", index, key); #ifdef SIMD_COEF_32 if (curdat.store_keys_in_input==2) dynamic_use_sse = 3; else if (curdat.md5_startup_in_x86) dynamic_use_sse = 2; else if (dynamic_use_sse==2) dynamic_use_sse = 1; #endif if (curdat.nPassCase>1) key = HandleCase(key, curdat.nPassCase); // Ok, if the key is in unicode/utf8, we switch it here one time, and are done with it. if (curdat.store_keys_in_input) { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { // code derived from rawMD5_fmt_plug.c code from magnum #if ARCH_ALLOWS_UNALIGNED const uint32_t *key32 = (uint32_t*)key; #else char buf_aligned[PLAINTEXT_LENGTH + 1] JTR_ALIGN(sizeof(uint32_t)); const uint32_t *key32 = is_aligned(key, sizeof(uint32_t)) ? (uint32_t*)key : (uint32_t*)strcpy(buf_aligned, key); #endif unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); uint32_t *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)]; uint32_t *keybuf_word = keybuffer; unsigned int len; uint32_t temp; len = 0; while((temp = *key32++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = (temp & 0xff) | (0x80 << 8); ++len; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = (temp & 0xffff) | (0x80 << 16); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = temp | (0x80U << 24); len+=3; goto key_cleaning; } *keybuf_word = temp; len += 4; keybuf_word += SIMD_COEF_32; } *keybuf_word = 0x80; key_cleaning: keybuf_word += SIMD_COEF_32; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += SIMD_COEF_32; } keybuffer[14*SIMD_COEF_32] = len << 3; return; } #endif len = strlen(key); if (len > 110) // we never do UTF-8 -> UTF-16 in this mode len = 110; // if (index==0) { // we 'have' to use full clean here. NOTE 100% sure why, but 10 formats fail if we do not. // __nonMP_DynamicFunc__clean_input_full(); // } #if MD5_X2 if (index & 1) memcpy(input_buf_X86[index>>MD5_X2].x2.b2, key, len); else #endif memcpy(input_buf_X86[index>>MD5_X2].x1.b, key, len); saved_key_len[index] = total_len_X86[index] = len; } else { len = strlen(key); if (len > 110 && !(fmt_Dynamic.params.flags & FMT_UNICODE)) len = 110; // if (index==0) { // __nonMP_DynamicFunc__clean_input_full(); // } keys_dirty = 1; memcpy(((char*)(saved_key[index])), key, len); saved_key_len[index] = len; } } static void clear_keys(void) { #ifdef SIMD_COEF_32 if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED) { __nonMP_DynamicFunc__clean_input_full(); return; } if (curdat.store_keys_in_input==1 || curdat.store_keys_in_input==3) return; if (curdat.md5_startup_in_x86) __nonMP_DynamicFunc__clean_input_full(); // This clean was causing failures (dirty buffers left) for dyna_51, 61 and formspring. // once commented out, dyna fully passes. I see no reason to keep this here at all. // else // __nonMP_DynamicFunc__clean_input_kwik(); #else __nonMP_DynamicFunc__clean_input_full(); #endif } /********************************************************************************* * Returns the key. NOTE how it gets it depends upon if we are storing * into the array of keys (there we simply return it), or if we are * loading into input buffer #1. If in input buffer, we have to re-create * the key, prior to returning it. *********************************************************************************/ static char *get_key(int index) { if (curdat.store_keys_in_input) { unsigned int i; unsigned char *cp; #ifdef SIMD_COEF_32 //if (dynamic_use_sse==1) { // Note, if we are not in if (dynamic_use_sse && !curdat.md5_startup_in_x86) { unsigned int s; unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); //if (curdat.store_keys_in_input && dynamic_use_sse==1) // s = saved_key_len[index]; // NOTE, we now have to get the length from the buffer, we do NOT store it into a saved_key_len buffer. uint32_t *keybuffer = &input_buf[idx].w[index&(SIMD_COEF_32-1)]; s = keybuffer[14*SIMD_COEF_32] >> 3; for (i=0;i<s;i++) out[i] = input_buf[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; out[i] = 0; return (char*)out; } #endif #if MD5_X2 if (index & 1) cp = input_buf_X86[index>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[index>>MD5_X2].x1.B; for (i=0;i<saved_key_len[index];++i) out[i] = cp[i]; out[i] = 0; return (char*)out; } else { saved_key[index][saved_key_len[index]] = '\0'; return saved_key[index]; } } /********************************************************************************* * Looks for ANY key that was cracked. *********************************************************************************/ static int cmp_all(void *binary, int count) { unsigned int i; #ifdef SIMD_COEF_32 unsigned int j; if (dynamic_use_sse&1) { unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32); for (i = 0; i < cnt; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) if ( *((uint32_t *)binary) == crypt_key[i].w[j]) return 1; } return 0; } #endif for (i = 0; i < count; i++) { #if MD5_X2 if (i&1) { if (!(((uint32_t *)binary)[0] - crypt_key_X86[i>>MD5_X2].x2.w2[0])) return 1; } else #endif if (!(((uint32_t *)binary)[0] - crypt_key_X86[i>>MD5_X2].x1.w[0])) return 1; } return 0; } #if ARCH_LITTLE_ENDIAN #define MASK_4x6 0x00ffffff #else #define MASK_4x6 0xffffff00 #endif static int cmp_all_64_4x6(void *binary, int count) { unsigned int i; #ifdef SIMD_COEF_32 unsigned int j; if (dynamic_use_sse==1) { unsigned int cnt = ( ((unsigned int)count+SIMD_COEF_32-1)/SIMD_COEF_32); for (i = 0; i < cnt; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) if ( *((uint32_t *)binary) == (crypt_key[i].w[j] & MASK_4x6)) return 1; } return 0; } #endif for (i = 0; i < count; i++) { #if MD5_X2 if (i&1) { if (!(((uint32_t *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x2.w2[0]&MASK_4x6))) return 1; } else #endif if (!(((uint32_t *)binary)[0] - (crypt_key_X86[i>>MD5_X2].x1.w[0]&MASK_4x6))) return 1; } return 0; } /********************************************************************************* * In this code, we always do exact compare, so if this function is called, it * simply returns true. *********************************************************************************/ static int cmp_exact(char *binary, int index) { return 1; } /********************************************************************************* * There was 'something' that was possibly hit. Now john will ask us to check * each one of the data items, for an 'exact' match. *********************************************************************************/ static int cmp_one(void *binary, int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); if ( (((uint32_t *)binary)[0] == ((uint32_t *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[1] == ((uint32_t *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[2] == ((uint32_t *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))]) && (((uint32_t *)binary)[3] == ((uint32_t *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))])) return 1; return 0; } #endif #if MD5_X2 if (index & 1) { if ( (((uint32_t *)binary)[0] == crypt_key_X86[index>>MD5_X2].x2.w2[0] ) && (((uint32_t *)binary)[1] == crypt_key_X86[index>>MD5_X2].x2.w2[1] ) && (((uint32_t *)binary)[2] == crypt_key_X86[index>>MD5_X2].x2.w2[2] ) && (((uint32_t *)binary)[3] == crypt_key_X86[index>>MD5_X2].x2.w2[3] ) ) return 1; return 0; } #endif if ( (((uint32_t *)binary)[0] == crypt_key_X86[index>>MD5_X2].x1.w[0] ) && (((uint32_t *)binary)[1] == crypt_key_X86[index>>MD5_X2].x1.w[1] ) && (((uint32_t *)binary)[2] == crypt_key_X86[index>>MD5_X2].x1.w[2] ) && (((uint32_t *)binary)[3] == crypt_key_X86[index>>MD5_X2].x1.w[3] ) ) return 1; return 0; } static int cmp_one_64_4x6(void *binary, int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); if ( (((uint32_t *)binary)[0] == (((uint32_t *)&(crypt_key[idx].c))[0*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[1] == (((uint32_t *)&(crypt_key[idx].c))[1*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[2] == (((uint32_t *)&(crypt_key[idx].c))[2*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6)) && (((uint32_t *)binary)[3] == (((uint32_t *)&(crypt_key[idx].c))[3*SIMD_COEF_32+(index&(SIMD_COEF_32-1))] & MASK_4x6))) return 1; return 0; } #endif #if MD5_X2 if (index & 1) { if ( (((uint32_t*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x2.w2[0] & MASK_4x6)) && (((uint32_t*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x2.w2[1] & MASK_4x6)) && (((uint32_t*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x2.w2[2] & MASK_4x6)) && (((uint32_t*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x2.w2[3] & MASK_4x6)) ) return 1; return 0; } #endif if ( (((uint32_t*)binary)[0] == (crypt_key_X86[index>>MD5_X2].x1.w[0] & MASK_4x6)) && (((uint32_t*)binary)[1] == (crypt_key_X86[index>>MD5_X2].x1.w[1] & MASK_4x6)) && (((uint32_t*)binary)[2] == (crypt_key_X86[index>>MD5_X2].x1.w[2] & MASK_4x6)) && (((uint32_t*)binary)[3] == (crypt_key_X86[index>>MD5_X2].x1.w[3] & MASK_4x6)) ) return 1; return 0; } /********************************************************************************* ********************************************************************************* * This is the real 'engine'. It simply calls functions one * at a time from the array of functions. ********************************************************************************* *********************************************************************************/ static int crypt_all(int *pcount, struct db_salt *salt) { // set m_count. This is our GLOBAL value, used by ALL of the script functions to know how // many keys are loaded, and how much work we do. m_count = *pcount; __nonMP_eLargeOut(eBase16); __nonMP_nLargeOff(0); #ifdef SIMD_COEF_32 // If this format is MMX built, but is supposed to start in X86 (but be switchable), then we // set that value here. if (curdat.store_keys_in_input==2) dynamic_use_sse = 3; else if (curdat.md5_startup_in_x86) dynamic_use_sse = 2; else if (dynamic_use_sse==2) dynamic_use_sse = 1; #endif __nonMP_md5_unicode_convert(0); if (curdat.dynamic_base16_upcase) { dynamic_itoa16 = itoa16u; itoa16_w2 = itoa16_w2_u; } else { dynamic_itoa16 = itoa16; itoa16_w2 = itoa16_w2_l; } // There may have to be some 'prelim' work done with the keys. This is so that if we 'know' that keys were // loaded into the keys[] array, but that we should do something like md5 and base-16 put them into an // input slot, then we do that FIRST, prior to calling the script functions. Thus for a format such as // md5(md5($p).$s) we could md5 the pass, and base-16 put it into a input buffer. Then when john sets salt // and calls crypt all, the crypt script would simply set the input len to 32, append the salt and call a // single crypt. That eliminates almost 1/2 of the calls to md5_crypt() for the format show in this example. if (keys_dirty) { if (curdat.store_keys_normal_but_precompute_hash_to_output2) { keys_dirty = 0; if (curdat.pSetup->flags & MGF_FULL_CLEAN_REQUIRED2) __nonMP_DynamicFunc__clean_input2_full(); else __nonMP_DynamicFunc__clean_input2(); if (curdat.store_keys_in_input_unicode_convert) __nonMP_md5_unicode_convert(1); __nonMP_DynamicFunc__append_keys2(); __nonMP_md5_unicode_convert(0); //if (curdat.using_flat_buffers_sse2_ok) { if (curdat.dynamic_use_sse == 0) { if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) { #ifdef _OPENMP #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(0,m_count,0); break #else #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_overwrite_input1(); break #endif switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) { CASE(MD5); CASE(MD4); CASE(SHA1); CASE(SHA224); CASE(SHA256); CASE(SHA384); CASE(SHA512); CASE(GOST); CASE(WHIRLPOOL); CASE(Tiger); CASE(RIPEMD128); CASE(RIPEMD160); CASE(RIPEMD256); CASE(RIPEMD320); CASE(HAVAL128_3); CASE(HAVAL128_4); CASE(HAVAL128_5); CASE(HAVAL160_3); CASE(HAVAL160_4); CASE(HAVAL160_5); CASE(HAVAL192_3); CASE(HAVAL192_4); CASE(HAVAL192_5); CASE(HAVAL224_3); CASE(HAVAL224_4); CASE(HAVAL224_5); CASE(HAVAL256_3); CASE(HAVAL256_4); CASE(HAVAL256_5); CASE(MD2); CASE(PANAMA); CASE(SKEIN224); CASE(SKEIN256); CASE(SKEIN384); CASE(SKEIN512); CASE(SHA3_224); CASE(SHA3_256); CASE(SHA3_384); CASE(SHA3_512); CASE(KECCAK_256); CASE(KECCAK_512); // LARGE_HASH_EDIT_POINT } } else if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) { unsigned int i; for (i = 0; i < m_count; ++i) total_len_X86[i] = curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX; #undef CASE #ifdef _OPENMP #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(0,m_count,0); break #else #define CASE(H) case MGF__##H: DynamicFunc__##H##_crypt_input2_append_input1(); break #endif switch(curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type) { CASE(MD5); CASE(MD4); CASE(SHA1); CASE(SHA224); CASE(SHA256); CASE(SHA384); CASE(SHA512); CASE(GOST); CASE(WHIRLPOOL); CASE(Tiger); CASE(RIPEMD128); CASE(RIPEMD160); CASE(RIPEMD256); CASE(RIPEMD320); CASE(HAVAL128_3); CASE(HAVAL128_4); CASE(HAVAL128_5); CASE(HAVAL160_3); CASE(HAVAL160_4); CASE(HAVAL160_5); CASE(HAVAL192_3); CASE(HAVAL192_4); CASE(HAVAL192_5); CASE(HAVAL224_3); CASE(HAVAL224_4); CASE(HAVAL224_5); CASE(HAVAL256_3); CASE(HAVAL256_4); CASE(HAVAL256_5); CASE(MD2); CASE(PANAMA); CASE(SKEIN224); CASE(SKEIN256); CASE(SKEIN384); CASE(SKEIN512); CASE(SHA3_224); CASE(SHA3_256); CASE(SHA3_384); CASE(SHA3_512); CASE(KECCAK_256); CASE(KECCAK_512); // LARGE_HASH_EDIT_POINT } } else { // calls 'old' code (ossl, sorry :( We should FIND and remove any format // written this way, if it is __possMP_DynamicFunc__crypt2_md5(); } } else { __possMP_DynamicFunc__crypt2_md5(); if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) { if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==2) __nonMP_DynamicFunc__SSEtoX86_switch_output2(); __nonMP_DynamicFunc__clean_input(); __nonMP_DynamicFunc__append_from_last_output2_to_input1_as_base16(); } } } } // Ok, now we 'run' the script. We simply call 1 function right after the other. // ALL functions are void f(void). They use the globals: // input_buf1[] input_buf2[] (requires thread safety) // total_len1[] total_len2[] (requires thread safety) // crypt1[] crypt2[] (requires thread safety) // md5_unicode_convert (requires thread safety, had to change to array) // saved_key[] (const?) // saved_key_len[] (const) // cursalt, cursalt2 (const) // saltlen, saltlen2 (const) // m_count (const) // nConsts (const) // Consts[], ConstsLen[] (const) // Since this array is in a structure, we assign a simple pointer to it // before walking. Trivial improvement, but every cycle counts :) { #ifdef _OPENMP if ((curdat.pFmtMain->params.flags & FMT_OMP) == FMT_OMP) { int j; unsigned int inc = (m_count+m_ompt-1) / m_ompt; //printf ("maxkeys=%d m_count=%d inc1=%d granularity=%d inc2=%d\n", curdat.pFmtMain->params.max_keys_per_crypt, m_count, inc, curdat.omp_granularity, ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity); inc = ((inc + curdat.omp_granularity-1)/curdat.omp_granularity)*curdat.omp_granularity; #pragma omp parallel for shared(curdat, inc, m_count) for (j = 0; j < m_count; j += inc) { unsigned int i; unsigned int top=j+inc; /* The last block may 'appear' to have more keys than we have in the entire buffer space. This is due to the granularity. If so, reduce that last one to stop at end of our buffers. NOT doing this is causes a huge buffer overflow. */ if (top > curdat.pFmtMain->params.max_keys_per_crypt) top = curdat.pFmtMain->params.max_keys_per_crypt; // we now run a full script in this thread, using only a subset of // the data, from [j,top) The next thread will run from [top,top+inc) // each thread will take the next inc values, until we get to m_count for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) (*(curdat.dynamic_FUNCTIONS[i]))(j,top,omp_get_thread_num()); } } else { unsigned int i; // same code (almost), but without the threads. for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) (*(curdat.dynamic_FUNCTIONS[i]))(0,m_count,0); } #else unsigned int i; for (i = 0; curdat.dynamic_FUNCTIONS[i]; ++i) { (*(curdat.dynamic_FUNCTIONS[i]))(); #if 0 // Dump state (for debugging help) if (i==0) printf("\npassword=%.*s\n", saved_key_len[0], saved_key[0]); printf ("\nState after function: %s\n", dynamic_Find_Function_Name(curdat.dynamic_FUNCTIONS[i])); // dump input 1 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("input_buf[0]", input_buf[0].c, 64, 0); dump_stuff_mmx_msg("input_buf[1]", input_buf[0].c, 64, 1); dump_stuff_mmx_msg("input_buf[2]", input_buf[0].c, 64, 2); dump_stuff_mmx_msg("input_buf[3]", input_buf[0].c, 64, 3); #endif printf ("input_buf86[0] : %*.*s\n", total_len_X86[0],total_len_X86[0],input_buf_X86[0].x1.b); printf ("input_buf86[1] : %*.*s\n", total_len_X86[1],total_len_X86[1],input_buf_X86[1].x1.b); printf ("input_buf86[2] : %*.*s\n", total_len_X86[2],total_len_X86[2],input_buf_X86[2].x1.b); printf ("input_buf86[3] : %*.*s\n", total_len_X86[3],total_len_X86[3],input_buf_X86[3].x1.b); // dump crypt 1 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("crypt_key[0]", crypt_key[0].c, 16, 0); dump_stuff_mmx_msg("crypt_key[1]", crypt_key[0].c, 16, 1); dump_stuff_mmx_msg("crypt_key[2]", crypt_key[0].c, 16, 2); dump_stuff_mmx_msg("crypt_key[3]", crypt_key[0].c, 16, 3); #endif dump_stuff_be_msg("crypt_key_X86[0]", crypt_key_X86[0].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[1]", crypt_key_X86[1].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[2]", crypt_key_X86[2].x1.b, 16); dump_stuff_be_msg("crypt_key_X86[3]", crypt_key_X86[3].x1.b, 16); // dump input 2 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("input_buf2[0]", input_buf2[0].c, 64, 0); dump_stuff_mmx_msg("input_buf2[1]", input_buf2[0].c, 64, 1); dump_stuff_mmx_msg("input_buf2[2]", input_buf2[0].c, 64, 2); dump_stuff_mmx_msg("input_buf2[3]", input_buf2[0].c, 64, 3); #endif printf ("input2_buf86[0] : %*.*s\n", total_len2_X86[0],total_len2_X86[0],input_buf2_X86[0].x1.b); printf ("input2_buf86[1] : %*.*s\n", total_len2_X86[1],total_len2_X86[1],input_buf2_X86[1].x1.b); printf ("input2_buf86[2] : %*.*s\n", total_len2_X86[2],total_len2_X86[2],input_buf2_X86[2].x1.b); printf ("input2_buf86[3] : %*.*s\n", total_len2_X86[3],total_len2_X86[3],input_buf2_X86[3].x1.b); // dump crypt 2 #ifdef SIMD_COEF_32 dump_stuff_mmx_msg("crypt_key2[0]", crypt_key2[0].c, 16, 0); dump_stuff_mmx_msg("crypt_key2[1]", crypt_key2[0].c, 16, 1); dump_stuff_mmx_msg("crypt_key2[2]", crypt_key2[0].c, 16, 2); dump_stuff_mmx_msg("crypt_key2[3]", crypt_key2[0].c, 16, 3); #endif dump_stuff_be_msg("crypt_key2_X86[0]", crypt_key2_X86[0].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[1]", crypt_key2_X86[1].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[2]", crypt_key2_X86[2].x1.b, 16); dump_stuff_be_msg("crypt_key2_X86[3]", crypt_key2_X86[3].x1.b, 16); #endif } #endif } return m_count; } /********************************************************************************* * 'normal' hashing functions *********************************************************************************/ extern char *MD5_DumpHexStr(void *p); #if !ARCH_LITTLE_ENDIAN // the lower 8 bits is zero on the binary (but filled in on the hash). We need to dump the low 8 static int binary_hash_0_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_0; } static int binary_hash_1_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_1; } static int binary_hash_2_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_2; } static int binary_hash_3_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_3; } static int binary_hash_4_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_4; } static int binary_hash_5_64x4(void * binary) { return (((uint32_t *)binary)[0]>>8) & PH_MASK_5; } static int get_hash_0_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_0; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_0;} static int get_hash_1_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_1; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_1;} static int get_hash_2_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_2; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_2;} static int get_hash_3_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_3; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_3;} static int get_hash_4_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_4; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_4;} static int get_hash_5_64x4(int index) { #if MD5_X2 if (index & 1) return (crypt_key_X86[index>>MD5_X2].x2.w2[0]>>8) & PH_MASK_5; #endif return (crypt_key_X86[index>>MD5_X2].x1.w[0]>>8) & PH_MASK_5;} #endif static int get_hash_0(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_0; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_0; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_0; } static int get_hash_1(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_1; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_1; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_1; } static int get_hash_2(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_2; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_2; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_2; } static int get_hash_3(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_3; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_3; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_3; } static int get_hash_4(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_4; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_4; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_4; } static int get_hash_5(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_5; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_5; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_5; } static int get_hash_6(int index) { #ifdef SIMD_COEF_32 if (dynamic_use_sse&1) { unsigned int idx = ( ((unsigned int)index)/SIMD_COEF_32); return ((uint32_t *)&(crypt_key[idx].c))[index&(SIMD_COEF_32-1)] & PH_MASK_6; } #endif #if MD5_X2 if (index & 1) return crypt_key_X86[index>>MD5_X2].x2.w2[0] & PH_MASK_6; #endif return crypt_key_X86[index>>MD5_X2].x1.w[0] & PH_MASK_6; } /************************************************************************ * We now fully handle all hashing of salts, here in the format. We * return a pointer ot an allocated salt record. Thus, we search all * of the salt records, looking for the same salt. If we find it, we * want to return THAT pointer, and not allocate a new pointer. * This works great, but forces us to do salt comparision here. ***********************************************************************/ #define DYNA_SALT_HASH_BITS SALT_HASH_LOG #define DYNA_SALT_HASH_SIZE (1<<DYNA_SALT_HASH_BITS) #define DYNA_SALT_HASH_MOD (DYNA_SALT_HASH_SIZE-1) typedef struct dyna_salt_list_entry { struct dyna_salt_list_entry *next; unsigned len; unsigned char *salt; } dyna_salt_list_entry; typedef struct { dyna_salt_list_entry *head, *tail; int count; } dyna_salt_list_main; typedef struct { dyna_salt_list_main List; } SaltHashTab_t; static SaltHashTab_t *SaltHashTab=NULL; static dyna_salt_list_entry *pSaltHashData=NULL, *pSaltHashDataNext=NULL; static int dyna_salt_list_count=0; static unsigned char *pSaltDataBuf=NULL, *pNextSaltDataBuf=NULL; static int nSaltDataBuf=0; static unsigned char *AddSaltHash(unsigned char *salt, unsigned int len, unsigned int idx) { unsigned char *pRet; if (dyna_salt_list_count == 0) { pSaltHashDataNext = pSaltHashData = mem_calloc_tiny(sizeof(dyna_salt_list_entry) * 25000, MEM_ALIGN_WORD); dyna_salt_list_count = 25000; } if (nSaltDataBuf < len) { pSaltDataBuf = pNextSaltDataBuf = mem_alloc_tiny(0x60000, MEM_ALIGN_NONE); nSaltDataBuf = 0x60000; } pRet = pNextSaltDataBuf; pSaltHashDataNext->salt = pNextSaltDataBuf; memcpy(pSaltHashDataNext->salt, salt, len); pSaltHashDataNext->len = len; pNextSaltDataBuf += len; nSaltDataBuf -= len; if (SaltHashTab[idx].List.count == 0) SaltHashTab[idx].List.tail = SaltHashTab[idx].List.head = pSaltHashDataNext; else { SaltHashTab[idx].List.tail->next = pSaltHashDataNext; SaltHashTab[idx].List.tail = pSaltHashDataNext; } ++SaltHashTab[idx].List.count; ++pSaltHashDataNext; --dyna_salt_list_count; return pRet; } static unsigned char *FindSaltHash(unsigned char *salt, unsigned int len, CRC32_t crc) { unsigned int idx = crc & DYNA_SALT_HASH_MOD; dyna_salt_list_entry *p; if (!SaltHashTab) SaltHashTab = mem_calloc_tiny(sizeof(SaltHashTab_t) * DYNA_SALT_HASH_SIZE, MEM_ALIGN_WORD); if (!SaltHashTab[idx].List.count) { return AddSaltHash(salt, len, idx); } // Ok, we have some salts in this hash list. Now walk the list, searching for an EQUAL salt. p = SaltHashTab[idx].List.head; while (p) { if (len == p->len && !memcmp((char*)salt, (char*)p->salt, len)) { return p->salt; // found it! return this one, so we do not allocate another. } p = p->next; } return AddSaltHash(salt, len, idx); } static unsigned char *HashSalt(unsigned char *salt, unsigned int len) { CRC32_t crc = 0xffffffff, i; unsigned char *ret_hash; // compute the hash. for (i = 0; i < len; ++i) crc = jtr_crc32(crc,salt[i]); crc = ~crc; ret_hash = FindSaltHash(salt, len, crc); return ret_hash; } static int ConvertFromHex(unsigned char *p, int len) { unsigned char *cp; unsigned int i, x; if (!p || memcmp(p, "HEX$", 4)) return len; // Ok, do a convert, and return 'new' len. len -= 4; len >>= 1; cp = p; x = len; for (i=4; x; --x, i+= 2) { *cp++ = atoi16[ARCH_INDEX(p[i])]*16 + atoi16[ARCH_INDEX(p[i+1])]; } *cp = 0; return len; } static unsigned int salt_external_to_internal_convert(unsigned char *extern_salt, unsigned char *Buffer) { // Ok, we get this: extern_salt = salt_data$$2salt2$$Uuser ... where anything can be missing or in any order // the any order has 1 exception of salt_data MUST be first. So if we get $$2salt2, then we know there is no salt-1 value. unsigned char *salt2=0, *userid=0, *Flds[10]; int i, nsalt2=0, nuserid=0, nFlds[10]={0,0,0,0,0,0,0,0,0,0}; unsigned int len = strlen((char*)extern_salt), bit; unsigned int bit_array=0; unsigned int the_real_len = 6; // 2 bytes base-8 length, and 4 bytes base-8 bitmap. // work from back of string to front, looking for the $$X signatures. for (i = len-3; i >= 0; --i) { if (extern_salt[i] == '$' && extern_salt[i+1] == '$') { // a 'likely' extra salt value. switch(extern_salt[i+2]) { case '2': if (curdat.b2Salts) { salt2 = &extern_salt[i+3]; nsalt2 = strlen((char*)salt2); nsalt2 = ConvertFromHex(salt2, nsalt2); extern_salt[i] = 0; bit_array |= 1; the_real_len += (nsalt2+1); } break; case 'U': if (curdat.nUserName) { userid = &extern_salt[i+3]; nuserid = strlen((char*)userid); nuserid = ConvertFromHex(userid, nuserid); extern_salt[i] = 0; bit_array |= 2; the_real_len += (nuserid+1); } break; case 'F': { if (extern_salt[i+3] >= '0' && extern_salt[i+3] <= '9') { if (curdat.FldMask && (curdat.FldMask & (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) == (MGF_FLDx_BIT<<(extern_salt[i+3]-'0'))) { Flds[extern_salt[i+3]-'0'] = &extern_salt[i+4]; nFlds[extern_salt[i+3]-'0'] = strlen((char*)(Flds[extern_salt[i+3]-'0'])); nFlds[extern_salt[i+3]-'0'] = ConvertFromHex(Flds[extern_salt[i+3]-'0'], nFlds[extern_salt[i+3]-'0']); extern_salt[i] = 0; bit_array |= (1<<(2+extern_salt[i+3]-'0')); the_real_len += (nFlds[extern_salt[i+3]-'0']+1); } break; } } } } } // We have now ripped the data apart. Now put it into Buffer, in proper ORDER // Length of salt (salt1) These 2 are stored as base-8 numbers. len = strlen((char*)extern_salt); len = ConvertFromHex(extern_salt, len); the_real_len += len; *Buffer++ = (len>>3) + '0'; *Buffer++ = (len&7) + '0'; // bit array *Buffer++ = (bit_array>>9) + '0'; *Buffer++ = ((bit_array>>6)&7) + '0'; *Buffer++ = ((bit_array>>3)&7) + '0'; *Buffer++ = (bit_array&7) + '0'; memcpy((char*)Buffer, (char*)extern_salt, len); Buffer += len; if (!bit_array) return the_real_len; if (nsalt2) { *Buffer++ = nsalt2; memcpy((char*)Buffer, (char*)salt2, nsalt2); Buffer += nsalt2; bit_array &= ~1; if (!bit_array) return the_real_len; } if (nuserid) { *Buffer++ = nuserid; memcpy((char*)Buffer, (char*)userid, nuserid); if (curdat.nUserName==2) { Buffer[nuserid] = 0; strupr((char*)Buffer); } else if (curdat.nUserName==2) { Buffer[nuserid] = 0; strlwr((char*)Buffer); } Buffer += nuserid; bit_array &= ~2; if (!bit_array) return the_real_len; } bit = 4; for (i = 0; i < 10; ++i, bit<<=1) { if (nFlds[i]) { *Buffer++ = nFlds[i]; memcpy((char*)Buffer, (char*)(Flds[i]), nFlds[i]); Buffer += nFlds[i]; bit_array &= ~bit; if (!bit_array) return the_real_len; } } return the_real_len; } /********************************************************************************* * This salt function has been TOTALLY re-written. Now, we do these things: * 1. convert from external format ($salt$$Uuser$$2HEX$salt2_in_hex, etc, into * our internal format. Our internal format is 2 base-8 numbers (2 digit and 4 * digit), followed by the 'raw' salt bytes, followed by pascal strings of any * other special salt values (salt2, user, flields 0 to 9). The first 2 digit * base 8 number is the length of the binary bytes of the 'real' salt. The * 2nd base-8 4 digit number, is a bit mask of what 'extra' salt types are * contained. * 2. We allocate and 'own' the salt buffers here, so that: * 3. We detect duplicate salts. NOTE, we have normalized the salts, so 2 salts that * appear different (external format), appear exactly the same on internal format. * Thus, we dupe remove them here. * 4. We allocation storage for the salts. The ONLY thing we return to john, is * a 4 (or 8 byte in 64 bit builds) pointer to the salt. Thus, when we find * a dupe, we do not have to allocate ANY memory, and simply return the pointer * to the original salt (which is the same as the one we are working on now). * * this is much more complex, however, it allows us to use much less memory, to * have the set_salt function operate VERY quickly (all processing is done here). * It also allows john load time to happen FASTER (yes faster), that it was happening * due to smaller memory footprint, and john's external salt collision to have * less work to do. The memory footprint was also reduced, because now we store * JUST the require memory, and a pointer. Before, often we stored a LOT of memory * for many format types. For a few types, we do use more memory with this method * than before, but for more the memory usage is way down. *********************************************************************************/ static void *get_salt(char *ciphertext) { char Salt[SALT_SIZE+1], saltIntBuf[SALT_SIZE+1]; int off, possible_neg_one=0; unsigned char *saltp; unsigned int the_real_len; static union x { unsigned char salt_p[sizeof(unsigned char*)]; ARCH_WORD p[1]; } union_x; if ( (curdat.pSetup->flags&MGF_SALTED) == 0) { memset(union_x.salt_p, 0, sizeof(union_x.salt_p)); return union_x.salt_p; } memset(Salt, 0, SALT_SIZE+1); // Ok, see if the wrong dynamic type is loaded (such as the 'last' dynamic type). if (!strncmp(ciphertext, "$dynamic_", 9)) { char *cp1 = &ciphertext[9]; char *cp2 = &curdat.dynamic_WHICH_TYPE_SIG[9]; while (*cp2 && *cp2 == *cp1) { ++cp1; ++cp2; } if (*cp2) { char subformat[17]; struct fmt_main *pFmtLocal; int nFmtNum; memcpy(subformat, ciphertext, 16); subformat[16] = 0; cp2 = &subformat[9]; while (*cp2 && *cp2 != '$') ++cp2; *cp2 = 0; nFmtNum = -1; sscanf(subformat, "$dynamic_%d", &nFmtNum); if (nFmtNum==-1) return union_x.salt_p; pFmtLocal = dynamic_Get_fmt_main(nFmtNum); memcpy(&curdat, pFmtLocal->private.data, sizeof(private_subformat_data)); } } if (curdat.dynamic_FIXED_SALT_SIZE==0 && !curdat.nUserName && !curdat.FldMask) return union_x.salt_p; if (!strncmp(ciphertext, "$dynamic_", 9)) off=curdat.dynamic_SALT_OFFSET; else off=curdat.dynamic_SALT_OFFSET-strlen(curdat.dynamic_WHICH_TYPE_SIG); if (ciphertext[off] == '$') { if (ciphertext[off+1]=='U' && curdat.nUserName) possible_neg_one = -1; else if (ciphertext[off+1]=='2' && curdat.b2Salts) possible_neg_one = -1; else if (ciphertext[off+1]=='F' && ciphertext[off+2]>='0' && ciphertext[off+2]<='9' && curdat.FldMask) { if ((curdat.FldMask & (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) == (MGF_FLDx_BIT<<(ciphertext[off+2]-'0'))) possible_neg_one = -1; } } strnzcpy(Salt, &ciphertext[off + possible_neg_one], SALT_SIZE); if (curdat.dynamic_salt_as_hex) { unsigned char Buf[128]; unsigned int slen=strlen(Salt); switch (curdat.dynamic_salt_as_hex_format_type) { // TODO: Come up with some way to put these into a CASE(HASH) #define #define SPH_CASE(H,F,S) case MGF__##H: {sph_##F##_context c;sph_##F##_init(&c);sph_##F(&c,(const unsigned char*)Salt,slen);sph_##F##_close(&c,Buf); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } #define OSSL_CASE(H,C,S) case MGF__##H: {C##_CTX c;H##_Init(&c);H##_Update(&c,Salt,slen);H##_Final(Buf,&c); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } #define KECCAK_CASE(H,S) case MGF__##H: {KECCAK_CTX c;H##_Init(&c);KECCAK_Update(&c,(BitSequence*)Salt,slen);KECCAK_Final(Buf,&c); \ memset(Salt,0,SALT_SIZE+1);base64_convert(Buf,e_b64_raw,S,Salt,e_b64_hex,SALT_SIZE, 0, 0);break; } case MGF__MD5: { // Do not 'worry' about SSE/MMX, Only do 'generic' md5. This is ONLY done // at the start of the run. We will NEVER see this run, once john starts. MD5_CTX ctx; int i; char *cpo; MD5_Init(&ctx); if (curdat.dynamic_salt_as_hex & 0x100) { char *s2 = mem_alloc(slen*2+1); for (i = 0; i < slen; ++i) { s2[i<<1] = Salt[i]; s2[(i<<1)+1] = 0; } MD5_Update(&ctx, s2, slen*2); MEM_FREE(s2); } else MD5_Update(&ctx, Salt, slen); MD5_Final(Buf, &ctx); if ( (curdat.dynamic_salt_as_hex&3) == 2) { strcat(Salt, "$$2"); cpo = &Salt[slen+3]; } else { cpo = Salt; memset(Salt, 0, SALT_SIZE+1); } base64_convert(Buf, e_b64_raw, 16, cpo, e_b64_hex, SALT_SIZE, 0, 0); break; } OSSL_CASE(MD4,MD4,16) OSSL_CASE(SHA1,SHA,20) OSSL_CASE(SHA224,SHA256,28) OSSL_CASE(SHA256,SHA256,32) OSSL_CASE(SHA384,SHA512,48) OSSL_CASE(SHA512,SHA512,64) OSSL_CASE(WHIRLPOOL,WHIRLPOOL,64) case MGF__GOST: { gost_ctx ctx; john_gost_init(&ctx); john_gost_update(&ctx, (const unsigned char*)Salt, slen); john_gost_final(&ctx, (unsigned char*)Buf); memset(Salt, 0, SALT_SIZE+1); base64_convert(Buf, e_b64_raw, 32, Salt, e_b64_hex, SALT_SIZE, 0, 0); break; } SPH_CASE(Tiger,tiger,24) SPH_CASE(RIPEMD128,ripemd128,16) SPH_CASE(RIPEMD160,ripemd160,20) SPH_CASE(RIPEMD256,ripemd256,32) SPH_CASE(RIPEMD320,ripemd320,40) SPH_CASE(HAVAL128_3,haval128_3,16) SPH_CASE(HAVAL128_4,haval128_4,16) SPH_CASE(HAVAL128_5,haval128_5,16) SPH_CASE(HAVAL160_3,haval160_3,20) SPH_CASE(HAVAL160_4,haval160_4,20) SPH_CASE(HAVAL160_5,haval160_5,20) SPH_CASE(HAVAL192_3,haval192_3,24) SPH_CASE(HAVAL192_4,haval192_4,24) SPH_CASE(HAVAL192_5,haval192_5,24) SPH_CASE(HAVAL224_3,haval224_3,28) SPH_CASE(HAVAL224_4,haval224_4,28) SPH_CASE(HAVAL224_5,haval224_5,28) SPH_CASE(HAVAL256_3,haval256_3,32) SPH_CASE(HAVAL256_4,haval256_4,32) SPH_CASE(HAVAL256_5,haval256_5,32) SPH_CASE(MD2,md2,16) SPH_CASE(PANAMA,panama,32) SPH_CASE(SKEIN224,skein224,28) SPH_CASE(SKEIN256,skein256,32) SPH_CASE(SKEIN384,skein384,48) SPH_CASE(SKEIN512,skein512,64) KECCAK_CASE(SHA3_224,28) KECCAK_CASE(SHA3_256,32) KECCAK_CASE(SHA3_384,48) KECCAK_CASE(SHA3_512,64) KECCAK_CASE(KECCAK_256,32) KECCAK_CASE(KECCAK_512,64) // LARGE_HASH_EDIT_POINT default: { error_msg("Invalid dynamic flags seen. Data type not yet defined\n"); } } } the_real_len = salt_external_to_internal_convert((unsigned char*)Salt, (unsigned char*)saltIntBuf); // Now convert this into a stored salt, or find the 'already' stored same salt. saltp = HashSalt((unsigned char*)saltIntBuf, the_real_len); memcpy(union_x.salt_p, &saltp, sizeof(saltp)); return union_x.salt_p; } /********************************************************************************* * Now our salt is returned only as a pointer. We *********************************************************************************/ static int salt_hash(void *salt) { unsigned long H; if (!salt) return 0; if ( (curdat.pSetup->flags&MGF_SALTED) == 0) return 0; // salt is now a pointer, but WORD aligned. We remove that word alingment, and simply use the next bits H = *((unsigned long*)salt); // Mix up the pointer value (H^(H>>9)) so that if we have a fixed sized allocation // that things do get 'stirred' up better. return ( (H^(H>>9)) & (SALT_HASH_SIZE-1) ); } static unsigned dynamic_this_salt_length(const void *v) { const unsigned char *s = (unsigned char*)v; unsigned l = *s++ - '0'; unsigned bits; l <<= 3; l += *s++ - '0'; #if ARCH_ALLOWS_UNALIGNED if (*((uint32_t*)s) == 0x30303030) #else if (!memcmp(s, "0000", 4)) #endif return l; bits = *s++ - '0'; bits <<= 3; bits += *s++ - '0'; bits <<= 3; bits += *s++ - '0'; bits <<= 3; bits += *s++ - '0'; s += l; while(bits) { if (bits & 1) { l += *s; s += *s; ++s; } bits >>= 1; } return l; } /* * dyna compare is required, to get all the shortest * salt strings first, then the next longer, then the * next, and finally the longest. Without this change * there are many dyna formats which will miss finding * hashes, because old dirty salt information gets left * over, blowing the next runs. There are many formats * which try to not clear buffers if they do not need * to, BUT this only works if salts are taken shortest * to longest. This sort builds the list of salts that way */ static int salt_compare(const void *x, const void *y) { /* this is all that is needed in dyna salt_compare(). Dyna is a pointer to a string, NOT the actual string. The first 2 bytes of string are length (base 8 ascii) */ const char *X = *((const char**)x); const char *Y = *((const char**)y); int l1, l2, l; if (*X<*Y) return -1; if (*X>*Y) return 1; if (X[1]<Y[1]) return -1; if (X[1]>Y[1]) return 1; // we had to make the salt order 100% deterministic, so that intersalt-restore l = l1 = dynamic_this_salt_length(X); l2 = dynamic_this_salt_length(Y); if (l2 < l) l = l2; l = memcmp(&X[6], &Y[6], l); if (l) return l; if (l1==l2) return 0; if (l1 > l2) return 1; return -1; } void dynamic_salt_md5(struct db_salt *s) { MD5_CTX ctx; int len; const char *S = *((const char**)s->salt); MD5_Init(&ctx); len = dynamic_this_salt_length(S); MD5_Update(&ctx, S + 6, len); MD5_Final((unsigned char*)(s->salt_md5), &ctx); } /********************************************************************************* * Gets the binary value from a base-16 hash. *********************************************************************************/ static void *get_binary(char *_ciphertext) { static char *realcipher; unsigned int i; char *ciphertext = _ciphertext; if (!realcipher) realcipher = mem_alloc_tiny(BINARY_SIZE_SHA, MEM_ALIGN_WORD); if (!strncmp(_ciphertext, "$dynamic_", 9)) { ciphertext += 9; while (*ciphertext++ != '$') ; } for (i=0;i<BINARY_SIZE;i++) { realcipher[i] = atoi16[ARCH_INDEX(ciphertext[i*2])]*16 + atoi16[ARCH_INDEX(ciphertext[i*2+1])]; } return (void *)realcipher; } // NOTE NOTE NOTE, we have currently ONLY implemented a non-salted function!!! static char *source(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 16; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_20_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 20; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_28_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 28; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_32_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 32; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_40_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 40; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_48_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 48; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } static char *source_64_hex(char *source, void *binary) { static char Buf[256]; unsigned char *cpi= (unsigned char*)(binary); char *cpo = Buf; unsigned int i; cpo += sprintf(Buf, "%s", curdat.dynamic_WHICH_TYPE_SIG); for (i = 0; i < 64; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } /********************************************************************************* * Gets the binary value from a base-64 hash *********************************************************************************/ static void * binary_b64m(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_mime, 0, 0); base64_convert(pos, e_b64_mime, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } static void * binary_b64(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_crypt, 0, 0); base64_convert(pos, e_b64_cryptBS, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } static void * binary_b64b(char *ciphertext) { unsigned int i; static unsigned char *b; char *pos; if (!b) b = mem_alloc_tiny(64+3, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } i = base64_valid_length(pos, e_b64_crypt, 0, 0); base64_convert(pos, e_b64_crypt, i, b, e_b64_raw, 64+3, 0, 0); //printf("\nciphertext=%s\n", ciphertext); //dump_stuff_msg("binary", b, 16); return b; } #define TO_BINARY(b1, b2, b3) \ value = \ (MD5_word)atoi64[ARCH_INDEX(pos[0])] | \ ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[2])] << 12) | \ ((MD5_word)atoi64[ARCH_INDEX(pos[3])] << 18); \ pos += 4; \ b[b1] = value >> 16; \ b[b2] = value >> 8; \ b[b3] = value; static void * binary_b64a(char *ciphertext) { static unsigned char *b; char *pos; MD5_word value; if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } TO_BINARY(0, 6, 12); TO_BINARY(1, 7, 13); TO_BINARY(2, 8, 14); TO_BINARY(3, 9, 15); TO_BINARY(4, 10, 5); b[11] = (MD5_word)atoi64[ARCH_INDEX(pos[0])] | ((MD5_word)atoi64[ARCH_INDEX(pos[1])] << 6); MD5_swap((MD5_word*)b,(MD5_word*)b, 4); return b; } /********************************************************************************* * Gets the binary value from a base-64 hash (such as cisco PIX) *********************************************************************************/ static void * binary_b64_4x6(char *ciphertext) { static uint32_t *b; unsigned int i; char *pos; if (!b) b = mem_alloc_tiny(16, MEM_ALIGN_WORD); pos = ciphertext; if (!strncmp(pos, "$dynamic_", 9)) { pos += 9; while (*pos++ != '$') ; } for (i = 0; i < 4; i++) { b[i] = atoi64[ARCH_INDEX(pos[i*4 + 0])] + (atoi64[ARCH_INDEX(pos[i*4 + 1])] << 6) + (atoi64[ARCH_INDEX(pos[i*4 + 2])] << 12) + (atoi64[ARCH_INDEX(pos[i*4 + 3])] << 18); } MD5_swap(b,b, 4); return (void *)b; } /********************************************************************************* * Here is the main mdg_generic fmt_main. NOTE in its default settings, it is * ready to handle base-16 hashes. *********************************************************************************/ static struct fmt_main fmt_Dynamic = { { FORMAT_LABEL, FORMAT_NAME, #ifdef SIMD_COEF_32 ALGORITHM_NAME, #else ALGORITHM_NAME_X86, #endif BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, #ifdef SIMD_COEF_32 PLAINTEXT_LENGTH, #else PLAINTEXT_LENGTH_X86, #endif BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, #ifdef SIMD_COEF_32 MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #else MIN_KEYS_PER_CRYPT_X86, MAX_KEYS_PER_CRYPT_X86, #endif #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, { NULL }, { NULL }, dynamic_tests }, { init, done, fmt_default_reset, prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, salt_compare, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; /************************************************************** ************************************************************** ************************************************************** ************************************************************** * These are the md5 'primitive' functions that are used by * the build-in expressions, and by the expression generator * They load passwords, salts, user ids, do crypts, convert * crypts into base-16, etc. They are pretty encompassing, * and have been found to be able to do most anything with * a standard 'base-16' md5 hash, salted or unsalted that * fits a 'simple' php style expression. ************************************************************** ************************************************************** ************************************************************** *************************************************************/ static void Dynamic_Load_itoa16_w2() { char buf[3]; unsigned int i; for (i = 0; i < 256; ++i) { sprintf(buf, "%X%X", i>>4, i&0xF); memcpy(&(itoa16_w2_u[i]), buf, 2); sprintf(buf, "%x%x", i>>4, i&0xF); memcpy(&(itoa16_w2_l[i]), buf, 2); } } #ifdef SIMD_COEF_32 /************************************************************** ************************************************************** * Here are some 'helpers' to our helpers, when it comes to * loading data into the mmx/sse buffers. We have several * of these common helper functions, and use them in 'most' * of the helper primitives, instead of having the same * code being inlined in each of them. ************************************************************** *************************************************************/ static void __SSE_append_output_base16_to_input(uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #3 // 5955K (core2, $dynamic_2$) // 1565K (core2, $dynamic_1006$) // 3381K (ath64, $dynamic_2$) // 824.7k (ath64, $dynamic_1006$) #undef inc #define inc ((SIMD_COEF_32-1) * 2) unsigned short *IPBw = (unsigned short*)IPBdw; IPBw += (idx_mod<<1); CRY += (idx_mod<<2); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw = 0x80; #undef inc } static void __SSE_overwrite_output_base16_to_input(uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #3 // 5955K (core2, $dynamic_2$) // 1565K (core2, $dynamic_1006$) // 3381K (ath64, $dynamic_2$) // 824.7k (ath64, $dynamic_1006$) #undef inc #define inc ((SIMD_COEF_32-1) * 2) unsigned short *IPBw = (unsigned short *)IPBdw; IPBw += (idx_mod<<1); CRY += (idx_mod<<2); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; CRY += (inc<<1); *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; *IPBw++ = itoa16_w2[*CRY++]; *IPBw++ = itoa16_w2[*CRY++]; IPBw += inc; #undef inc } static void __SSE_append_output_base16_to_input_semi_aligned_2(unsigned int ip, uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #1 // 9586k/4740k (core2, $dynamic_9$) // 5113k/4382k (core2,$dynamic_10$) // (ath64, $dynamic_9$) // (ath64, $dynamic_10$) #define inc SIMD_COEF_32 #define incCRY ((SIMD_COEF_32 - 1) * 4) // Ok, here we are 1/2 off. We are starting in the 'middle' of a DWORD (and end // in the middle of the last one). // start our pointers out at the right 32 bit offset into the first MMX/SSE buffer IPBdw += idx_mod; IPBdw += (ip>>2)*SIMD_COEF_32; CRY += (idx_mod<<2); // first byte handled here. *IPBdw &= 0xFFFF; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); CRY += incCRY; *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); *IPBdw |= (((uint32_t)(itoa16_w2[*CRY++]))<<16); IPBdw += inc; *IPBdw = (itoa16_w2[*CRY++]); // Add the 0x80 at the proper location (offset 0x21) *IPBdw |= 0x800000; #undef inc #undef incCRY } static void __SSE_append_output_base16_to_input_semi_aligned_0(unsigned int ip, uint32_t *IPBdw, unsigned char *CRY, unsigned int idx_mod) { // #2 // 6083k (core2, $dynamic_2$) // 1590K (core2, $dynamic_1006$) // 3537K (ath64, $dynamic_2$) // 890.3K (ath64, $dynamic_1006$) #undef inc #define inc SIMD_COEF_32 #define incCRY (4*SIMD_COEF_32-2) // start our pointers out at the right 32 bit offset into the first MMX/SSE buffer IPBdw += idx_mod; IPBdw += (ip>>2)*SIMD_COEF_32; CRY += (idx_mod<<2); *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; // CRY += (inc*3)+2; CRY += incCRY; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); IPBdw += inc; CRY += 2; *IPBdw = (((uint32_t)(itoa16_w2[*(CRY+1)]))<<16)|(itoa16_w2[*CRY]); // Add the 0x80 at the proper location (offset 0x21) IPBdw += inc; *IPBdw = 0x80; #undef inc #undef incCRY } static void __SSE_append_string_to_input_unicode(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80) { unsigned char *cpO; #if ARCH_LITTLE_ENDIAN // if big-endian, we gain nothing from this function (since we would have to byte swap) if (len>1&&!(bf_ptr&1)) { unsigned int w32_cnt; if (bf_ptr&2) { cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; bf_ptr += 2; *cpO = *cp++; cpO[1] = 0; --len; } w32_cnt = len>>1; if (w32_cnt) { uint32_t *wpO; wpO = (uint32_t*)&IPB[GETPOS(bf_ptr, idx_mod)]; len -= (w32_cnt<<1); bf_ptr += (w32_cnt<<2); do { uint32_t x = 0; x = cp[1]; x <<= 16; x += cp[0]; *wpO = x; cp += 2; wpO += SIMD_COEF_32; } while (--w32_cnt); } } #endif cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); *cpO++ = 0; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); } if (bUpdate0x80) *cpO = 0x80; } static void __SSE_append_string_to_input(unsigned char *IPB, unsigned int idx_mod, unsigned char *cp, unsigned int len, unsigned int bf_ptr, unsigned int bUpdate0x80) { unsigned char *cpO; // if our insertion point is on an 'even' DWORD, then we use DWORD * copying, as long as we can // This provides quite a nice speedup. #if ARCH_LITTLE_ENDIAN // if big-endian, we gain nothing from this function (since we would have to byte swap) if (len>3&&(bf_ptr&3)) { cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) { if (!len) { if (bUpdate0x80) *cpO = 0x80; return; } break; } } } if (len>3&&!(bf_ptr&3)) { unsigned int w32_cnt = len>>2; if (w32_cnt) { uint32_t *wpO; wpO = (uint32_t*)&IPB[GETPOS(bf_ptr, idx_mod)]; len -= (w32_cnt<<2); bf_ptr += (w32_cnt<<2); do { *wpO = *((uint32_t*)cp); cp += 4; wpO += SIMD_COEF_32; } while (--w32_cnt); } if (!len) { if (bUpdate0x80) IPB[GETPOS(bf_ptr, idx_mod)] = 0x80; return; } } #endif cpO = &IPB[GETPOS(bf_ptr, idx_mod)]; while (len--) { *cpO++ = *cp++; if ( ((++bf_ptr)&3) == 0) cpO += ((SIMD_COEF_32-1)*4); } if (bUpdate0x80) *cpO = 0x80; } #endif // #ifdef SIMD_COEF_32 from way above. inline static void __append_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (!utf16) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += len; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1); } } else { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += outlen; // note we use the 'non' unicode variant, since we have already computed the unicode, and length properly __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } } else { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; total_len[idx][idx_mod] += len << 1; __SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,Str,len,bf_ptr,1); } } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = (unsigned char*)utf16Str; if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < outlen; ++z) { *cp++ = *cpi++; } total_len_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = Str; if (total_len_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < len; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len_X86[j] += (len<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), Str, len); else #endif memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), Str, len); total_len_X86[j] += len; } } } inline static void __append2_string(DYNA_OMP_PARAMSm unsigned char *Str, unsigned int len) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (!utf16) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += len; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1); } } else { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += outlen; // note we use the 'non' unicode variant of __SSE_append_string_to_input(), since it's already unicode, and length properly __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } } else { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; total_len2[idx][idx_mod] += len << 1; __SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,Str,len,bf_ptr,1); } } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, Str, len) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = (unsigned char*)utf16Str; if (total_len2_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < outlen; ++z) { *cp++ = *cpi++; } total_len2_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp; unsigned char *cpi = Str; if (total_len2_X86[j] + (len<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < len; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len2_X86[j] += (len<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), Str, len); else #endif memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), Str, len); total_len2_X86[j] += len; } } } void DynamicFunc__setmode_unicodeBE(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(2,tid); } void DynamicFunc__setmode_unicode(DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(1,tid); } void DynamicFunc__setmode_normal (DYNA_OMP_PARAMS) // DYNA_OMP_PARAMS not used. We use omp_thread_num() instead. { md5_unicode_convert_set(0,tid); } /************************************************************** * DYNAMIC primitive helper function * Clears the input variable, and input 'lengths' *************************************************************/ void DynamicFunc__clean_input(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input(); #else unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf[x].c, 0, sizeof(input_buf[0])); memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); ++x; } return; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2(); #else unsigned int i=0; #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf2[x].c, 0, sizeof(input_buf2[0])); memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); ++x; } return; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; } #endif } void DynamicFunc__clean_input_full(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input_full(); #else unsigned int i; #ifdef SIMD_COEF_32 unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf[x].c, 0, sizeof(input_buf[0])); memset(total_len[x], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); ++x; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len_X86[i])); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len_X86[i])); total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2_full(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2_full(); #else unsigned int i; #ifdef SIMD_COEF_32 unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) { memset(input_buf2[x].c, 0, sizeof(input_buf2[0])); memset(total_len2[x], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); ++x; } #endif for (i = first; i < last; ++i) { #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, COMPUTE_EX_LEN(total_len2_X86[i])); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, COMPUTE_EX_LEN(total_len2_X86[i])); total_len2_X86[i] = 0; } #endif } void DynamicFunc__clean_input_kwik(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input_kwik(); #else #ifdef SIMD_COEF_32 unsigned int i; if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) memset(total_len[x++], 0, SIMD_COEF_32 * sizeof(total_len[0][0])); return; } #else unsigned int i; #endif for (i = first; i < last; ++i) { #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (i&1) memset(input_buf_X86[i>>MD5_X2].x2.b2, 0, total_len_X86[i]+5); else #endif memset(input_buf_X86[i>>MD5_X2].x1.b, 0, total_len_X86[i]+5); #endif total_len_X86[i] = 0; } #endif } void DynamicFunc__clean_input2_kwik(DYNA_OMP_PARAMS) { #ifndef _OPENMP __nonMP_DynamicFunc__clean_input2_kwik(); #else #ifdef SIMD_COEF_32 unsigned int i; if (dynamic_use_sse==1) { unsigned int x = first / SIMD_COEF_32; unsigned int y = (last+SIMD_COEF_32-1) / SIMD_COEF_32; while (x < y) memset(total_len2[x++], 0, SIMD_COEF_32 * sizeof(total_len2[0][0])); return; } #else unsigned int i; #endif for (i = first; i < last; ++i) { #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (i&1) memset(input_buf2_X86[i>>MD5_X2].x2.b2, 0, total_len2_X86[i]+5); else #endif memset(input_buf2_X86[i>>MD5_X2].x1.b, 0, total_len2_X86[i]+5); #endif total_len2_X86[i] = 0; } #endif } /************************************************************** * DYNAMIC primitive helper function * Appends all keys to the end of the input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_keys(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; int maxlen=27; if (curdat.pSetup->MaxInputLen < maxlen) maxlen = curdat.pSetup->MaxInputLen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } total_len[idx][idx_mod] += outlen; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } else { total_len[idx][idx_mod] += (saved_key_len[j] << 1); __SSE_append_string_to_input_unicode(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi; UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } // only copy data if it will NOT trash the buffer if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z) *cp++ = *cpi++; total_len_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)saved_key[j]; if (total_len_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); for (z = 0; z < saved_key_len[j]; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len_X86[j] += (saved_key_len[j]<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], saved_key_len[j]); else #endif memcpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], saved_key_len[j]); total_len_X86[j] += saved_key_len[j]; } } } // DynamicFunc__append_keys_pad16 // append the array of keys to the array input1[], padding with nulls to 16 bytes, if input shorter. // Needed for net-md5 and net-sha1 formats. void DynamicFunc__append_keys_pad16(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' if (saved_key_len[j] < 16) { char buf[24]; strncpy(buf, saved_key[j], 18); total_len[idx][idx_mod] += 16; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,16,bf_ptr,1); } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif for (; j < til; ++j) { saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' #if MD5_X2 if (j&1) strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 17); else #endif strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 17); total_len_X86[j] += 16; } } void DynamicFunc__append_keys_pad20(DYNA_OMP_PARAMS) { unsigned int j; unsigned int til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len[idx][idx_mod]; saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' if (saved_key_len[j] < 20) { char buf[28]; strncpy(buf, saved_key[j], 22); total_len[idx][idx_mod] += 20; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)buf,20,bf_ptr,1); } else { total_len[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif for (; j < til; ++j) { saved_key[j][saved_key_len[j]] = 0; // so strncpy 'works' #if MD5_X2 if (j&1) strncpy(&(input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]]), saved_key[j], 21); else #endif strncpy(&(input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]]), saved_key[j], 21); total_len_X86[j] += 20; } } /************************************************************** * DYNAMIC primitive helper function * Appends all keys to the end of the 2nd input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_keys2(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { for (; j < til; ++j) { unsigned int idx = j/SIMD_COEF_32; unsigned int idx_mod = j&(SIMD_COEF_32-1); unsigned int bf_ptr = total_len2[idx][idx_mod]; if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; int maxlen=27; if (curdat.pSetup->MaxInputLen < maxlen) maxlen = curdat.pSetup->MaxInputLen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, maxlen, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } total_len2[idx][idx_mod] += outlen; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)utf16Str,outlen,bf_ptr,1); } else { total_len2[idx][idx_mod] += (saved_key_len[j] << 1); __SSE_append_string_to_input_unicode(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } else { total_len2[idx][idx_mod] += saved_key_len[j]; __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,(unsigned char*)saved_key[j],saved_key_len[j],bf_ptr,1); } } return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi; UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)saved_key[j], saved_key_len[j]) * sizeof(UTF16); if (outlen <= 0) { saved_key_len[j] = -outlen / sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); } // only copy data if it will NOT trash the buffer if (total_len_X86[j] + outlen <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (cpi = (unsigned char*)utf16Str, z = 0; z < outlen; ++z) *cp++ = *cpi++; total_len2_X86[j] += outlen; } } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)saved_key[j]; if (total_len2_X86[j] + (saved_key_len[j]<<1) <= MAX_BUFFER_OFFSET_AVOIDING_OVERWRITE) { #if MD5_X2 if (j&1) cp = &(input_buf2_X86[j>>MD5_X2].x2.B2[total_len2_X86[j]]); else #endif cp = &(input_buf2_X86[j>>MD5_X2].x1.B[total_len2_X86[j]]); for (z = 0; z < saved_key_len[j]; ++z) { *cp++ = *cpi++; *cp++ = 0; } total_len2_X86[j] += (saved_key_len[j]<<1); } } } } else { for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(&(input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]]), saved_key[j], saved_key_len[j]); else #endif memcpy(&(input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]]), saved_key[j], saved_key_len[j]); total_len2_X86[j] += saved_key_len[j]; } } } void DynamicFunc__set_input_len_16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 16, then remove existing end of buffer marker, and then set // one at offset 16 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len[j][k]; if (this_item_len < 16) input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 16; } } return; } #endif for (; j < til; ++j) { // TODO: this code MAY need buffer cleaned up if we are using md5_go code!!! #if MD5_X2 if (j&1) { while (total_len_X86[j] < 16) input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0; } else #endif {while (total_len_X86[j] < 16) input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;} total_len_X86[j] = 16; } } void DynamicFunc__set_input2_len_16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 16, then remove existing end of buffer marker, and then set // one at offset 16 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len2[j][k]; if (this_item_len < 16) input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf2[j].c[GETPOS(16, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 16; } } return; } #endif for (; j < til; ++j) { // TODO: this code MAY need buffer cleaned up if we are using md5_go code!!! #if MD5_X2 if (j&1) { while (total_len2_X86[j] < 16) input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0; } else #endif {while (total_len2_X86[j] < 16) input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;} total_len2_X86[j] = 16; } } void DynamicFunc__set_input_len_20(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 20, then remove existing end of buffer marker, and then set // one at offset 20 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len[j][k]; if (this_item_len < 20) input_buf[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 20; } } return; } #endif for (; j < til; ++j) { #if MD5_X2 if (j&1) { while (total_len_X86[j] < 20) input_buf_X86[j>>MD5_X2].x2.b2[total_len_X86[j]++] = 0; } else #endif {while (total_len_X86[j] < 20) input_buf_X86[j>>MD5_X2].x1.b[total_len_X86[j]++] = 0;} total_len_X86[j] = 20; } } void DynamicFunc__set_input2_len_20(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int k; j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { // If length is < 20, then remove existing end of buffer marker, and then set // one at offset 20 for (k = 0; k < SIMD_COEF_32; ++k) { unsigned int this_item_len = total_len2[j][k]; if (this_item_len < 20) input_buf2[j].c[GETPOS(this_item_len, k&(SIMD_COEF_32-1))] = 0x00; input_buf2[j].c[GETPOS(20, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 20; } } return; } #endif for (; j < til; ++j) { #if MD5_X2 if (j&1) { while (total_len2_X86[j] < 20) input_buf2_X86[j>>MD5_X2].x2.b2[total_len2_X86[j]++] = 0; } else #endif {while (total_len2_X86[j] < 20) input_buf2_X86[j>>MD5_X2].x1.b[total_len2_X86[j]++] = 0;} total_len2_X86[j] = 20; } } void DynamicFunc__set_input_len_32(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len_X86[j] = 32; } void DynamicFunc__set_input_len_32_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80; total_len[j][k] = 32; } } return; } #endif for (; j < til; ++j) { total_len_X86[j] = 32; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { //MD5_swap(input_buf_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8); memset(&(input_buf_X86[j>>MD5_X2].x2.B2[32]), 0, 24); } else #endif { //MD5_swap(input_buf_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8); memset(&(input_buf_X86[j>>MD5_X2].x1.B[32]), 0, 24); } #endif } } void DynamicFunc__set_input2_len_32(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len2_X86[j] = 32; } void DynamicFunc__set_input2_len_32_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf2[j].c[GETPOS(32, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 32; } } return; } #endif for (; j < til; ++j) { total_len2_X86[j] = 32; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { //MD5_swap(input_buf2_X86[j>>MD5_X2].x2.w2, input_buf2_X86[j>>MD5_X2].x2.w2, 8); memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[32]), 0, 24); } else #endif { //MD5_swap(input_buf2_X86[j>>MD5_X2].x1.w, input_buf2_X86[j>>MD5_X2].x1.w, 8); memset(&(input_buf2_X86[j>>MD5_X2].x1.B[32]), 0, 24); } #endif } } void DynamicFunc__set_input_len_40(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len_X86[j] = 40; } void DynamicFunc__set_input2_len_40(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif for (; j < til; ++j) total_len2_X86[j] = 40; } void DynamicFunc__set_input2_len_40_cleartop(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { j /= SIMD_COEF_32; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; for (; j < til; ++j) { unsigned int k; for (k = 0; k < SIMD_COEF_32; ++k) { input_buf2[j].c[GETPOS(40, k&(SIMD_COEF_32-1))] = 0x80; total_len2[j][k] = 40; } } return; } #endif for (; j < til; ++j) { total_len2_X86[j] = 40; #if !ARCH_LITTLE_ENDIAN #if MD5_X2 if (j&1) { memset(&(input_buf2_X86[j>>MD5_X2].x2.B2[40]), 0, 16); } else #endif { memset(&(input_buf2_X86[j>>MD5_X2].x1.B[40]), 0, 16); } #endif } } void DynamicFunc__set_input_len_64(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_64 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 64; } void DynamicFunc__set_input2_len_64(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_64 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 64; } void DynamicFunc__set_input_len_100(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_100 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) { unsigned char *cp; #if MD5_X2 if (j&1) cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); else #endif cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); while (*cp) *cp++ = 0; total_len_X86[j] = 100; } } void DynamicFunc__set_input_len_24(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_24 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 24; } void DynamicFunc__set_input_len_28(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_28 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 28; } void DynamicFunc__set_input_len_48(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_48 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 48; } void DynamicFunc__set_input_len_56(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_56 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 56; } void DynamicFunc__set_input_len_80(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_80 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 80; } void DynamicFunc__set_input_len_96(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_96 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 96; } void DynamicFunc__set_input_len_112(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_112 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 112; } void DynamicFunc__set_input_len_128(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_128 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 128; } void DynamicFunc__set_input_len_160(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_160 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 160; } void DynamicFunc__set_input_len_192(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_192 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 192; } void DynamicFunc__set_input_len_256(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input_len_256 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len_X86[j] = 256; } void DynamicFunc__set_input2_len_24(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_24 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 24; } void DynamicFunc__set_input2_len_28(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_28 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 28; } void DynamicFunc__set_input2_len_48(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_48 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 48; } void DynamicFunc__set_input2_len_56(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_56 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 56; } void DynamicFunc__set_input2_len_80(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_80 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 80; } void DynamicFunc__set_input2_len_96(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_96 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 96; } void DynamicFunc__set_input2_len_112(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_112 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 112; } void DynamicFunc__set_input2_len_128(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_128 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 128; } void DynamicFunc__set_input2_len_160(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_160 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 160; } void DynamicFunc__set_input2_len_192(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_192 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 192; } void DynamicFunc__set_input2_len_256(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP til = last; j = first; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse == 1) error_msg("Error, in your DYNAMIC script.\nIt is NOT valid to call DynamicFunc__set_input2_len_256 in SSE2/MMX mode\n"); #endif for (; j < til; ++j) total_len2_X86[j] = 256; } /************************************************************** * DYNAMIC primitive helper function * Appends the salt to the end of the input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_salt(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm cursalt, saltlen); } /************************************************************** * DYNAMIC primitive helper function * Appends the salt to the end of the 2nd input variables, and * updates lengths *************************************************************/ void DynamicFunc__append_salt2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm cursalt, saltlen); } void DynamicFunc__append_input_from_input2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len[i][j]; unsigned int len1 = total_len2[i][j]; for (k = 0; k < len1; ++k) input_buf[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)]; input_buf[i].c[GETPOS((len1+start_len), j)] = 0x80; total_len[i][j] += len1; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]); else #endif memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]); total_len_X86[i] += total_len2_X86[i]; } } void DynamicFunc__append_input2_from_input(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len2[i][j]; unsigned int len1 = total_len[i][j]; for (k = 0; k < len1; ++k) input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)]; input_buf2[i].c[GETPOS((len1+start_len), j)] = 0x80; total_len2[i][j] += len1; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]); else #endif memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]); total_len2_X86[i] += total_len_X86[i]; } } void DynamicFunc__append_input_from_input(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len[i][j]; for (k = 0; k < start_len; ++k) input_buf[i].c[GETPOS((k+start_len), j)] = input_buf[i].c[GETPOS(k,j)]; input_buf[i].c[GETPOS((start_len+start_len), j)] = 0x80; total_len[i][j] += start_len; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf_X86[i>>MD5_X2].x2.b2[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x2.b2, total_len_X86[i]); else #endif memcpy(&(input_buf_X86[i>>MD5_X2].x1.b[total_len_X86[i]]), input_buf_X86[i>>MD5_X2].x1.b, total_len_X86[i]); total_len_X86[i] <<= 1; } } void DynamicFunc__append_input2_from_input2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int j, k; til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; ++i) { for (j = 0; j < SIMD_COEF_32; ++j) { unsigned int start_len = total_len2[i][j]; for (k = 0; k < start_len; ++k) input_buf2[i].c[GETPOS((k+start_len), j)] = input_buf2[i].c[GETPOS(k,j)]; input_buf2[i].c[GETPOS((start_len+start_len), j)] = 0x80; total_len2[i][j] += start_len; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 if (i&1) memcpy(&(input_buf2_X86[i>>MD5_X2].x2.b2[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x2.b2, total_len2_X86[i]); else #endif memcpy(&(input_buf2_X86[i>>MD5_X2].x1.b[total_len2_X86[i]]), input_buf2_X86[i>>MD5_X2].x1.b, total_len2_X86[i]); total_len2_X86[i] <<= 1; } } #ifdef SIMD_PARA_MD5 static void SSE_Intrinsics_LoadLens_md5(int side, int i) { uint32_t *p; unsigned int j, k; if (side == 0) { for (j = 0; j < SIMD_PARA_MD5; j++) { p = input_buf[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3; } } else { for (j = 0; j < SIMD_PARA_MD5; j++) { p = input_buf2[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3; } } } #endif #ifdef SIMD_PARA_MD4 static void SSE_Intrinsics_LoadLens_md4(int side, int i) { uint32_t *p; unsigned int j, k; if (side == 0) { for (j = 0; j < SIMD_PARA_MD4; j++) { p = input_buf[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len[i+j][k] << 3; } } else { for (j = 0; j < SIMD_PARA_MD4; j++) { p = input_buf2[i+j].w; for (k = 0; k < SIMD_COEF_32; k++) p[14*SIMD_COEF_32+k] = total_len2[i+j][k] << 3; } } } #endif /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the first input field. The data is * still in the binary encrypted format, in the crypt_key. * we do not yet convert to base-16. This is so we can output * as base-16, or later, if we add base-64, we can output to * that format instead. *************************************************************/ void DynamicFunc__crypt_md5(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD5) { SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(0, i); SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD4) { SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(0, i); SIMDmd4body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__POCrypt(DYNA_OMP_PARAMS) { unsigned int i, j; unsigned int til, len; unsigned char *pBuf; #if MD5_X2 unsigned char *pBuf2; unsigned int lens[2]; #endif #ifdef _OPENMP til = last; i = first; #else i = 0; til = m_count; #endif //DynamicFunc__clean_input_kwik(); //DynamicFunc__append_salt, //DynamicFunc__append_input1_from_CONST1, //DynamicFunc__append_keys, //DynamicFunc__append_input1_from_CONST2, //DynamicFunc__append_salt, //DynamicFunc__crypt_md5, pBuf = input_buf_X86[i>>MD5_X2].x1.B; #if MD5_X2 pBuf2 = input_buf_X86[i>>MD5_X2].x2.B2; memset(pBuf2, 0, sizeof(input_buf_X86[i>>MD5_X2].x2.B2)); memcpy(pBuf2, cursalt, 32); pBuf2[32] = 'Y'; #endif memset(pBuf, 0, sizeof(input_buf_X86[i>>MD5_X2].x1.b)); memcpy(pBuf, cursalt, 32); pBuf[32] = 'Y'; for (j = i; j < til; ++j) { len = saved_key_len[j]; memcpy(&pBuf[33], saved_key[j], len); pBuf[33+len] = 0xf7; memcpy(&pBuf[34+len], cursalt, 32); #if MD5_X2 lens[0] = len+66; // len from the 'first' ++j; if (j < m_count) { len = saved_key_len[j]; memcpy(&pBuf2[33], saved_key[j], len); pBuf2[33+len] = 0xf7; memcpy(&pBuf2[34+len], cursalt, 32); lens[1] = len+66; } else { lens[1] = 0; } DoMD5(input_buf_X86[i>>MD5_X2], lens, crypt_key_X86[j>>MD5_X2]); #else DoMD5(input_buf_X86[i>>MD5_X2], (len+66), crypt_key_X86[j]); #endif } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 2nd input field into crypt_keys2. *************************************************************/ void DynamicFunc__crypt2_md5(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(1, i); SIMDmd5body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i < m_count) len[1] = total_len2_X86[i]; else len[1] = 0; #else unsigned int len = total_len2_X86[i]; #endif DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } void DynamicFunc__crypt2_md4(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(1, i); SIMDmd4body(input_buf2[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 1st input field crypt_keys2. *************************************************************/ void DynamicFunc__crypt_md5_in1_to_out2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD5) { SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(0, i); SIMDmd5body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4_in1_to_out2(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; if (curdat.store_keys_in_input) { for (; i < til; i += SIMD_PARA_MD4) { SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } else { for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(0, i); SIMDmd4body(input_buf[i].c, crypt_key2[i].w, NULL, SSEi_MIXED_IN); } } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD4(input_buf_X86[i>>MD5_X2], len, crypt_key2_X86[i>>MD5_X2]); } } /************************************************************** * DYNAMIC primitive helper function * Encrypts the data in the 2nd input field into crypt_keys. *************************************************************/ void DynamicFunc__crypt_md5_in2_to_out1(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { SSE_Intrinsics_LoadLens_md5(1, i); SIMDmd5body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); //dump_stuff_mmx_msg("DynamicFunc__crypt_md5_in2_to_out1", input_buf2[i].c,64,m_count-1); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD5(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md4_in2_to_out1(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD4) { SSE_Intrinsics_LoadLens_md4(1, i); SIMDmd4body(input_buf2[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); } return; } #endif for (; i < til; ++i) { // MD5_X2 sets our input buffers and crypt keys up in 'double' format. Thus, we HAVE // to treat them just like we do in MD5. The macro hides the details. #if MD5_X2 unsigned int len[2]; len[0] = total_len2_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len2_X86[i]; #else unsigned int len = total_len2_X86[i]; #endif DoMD4(input_buf2_X86[i>>MD5_X2], len, crypt_key_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md5_to_input_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j, k; SSE_Intrinsics_LoadLens_md5(0, i); // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) { memset(input_buf[i+j].c, 0, sizeof(input_buf[0])); memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); for (k = 0; k < SIMD_COEF_32; k++) total_len[i+j][k] = 16; } } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i]; total_len_X86[i++] = 0x10; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); total_len_X86[i] = 0x10; } } void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j; SSE_Intrinsics_LoadLens_md5(0, i); // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif DoMD5(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); } } void DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { til = (til+SIMD_COEF_32-1)/SIMD_COEF_32; i /= SIMD_COEF_32; for (; i < til; i += SIMD_PARA_MD5) { unsigned int j; // NOTE, since crypt_key array is 16 bytes each, and input_buf is 64 bytes // each, and we are doing 3 at a time, we can NOT directly write to the // input buff, but have to use the crypt_key buffer, and then memcpy when done. SIMDmd5body(input_buf[i].c, crypt_key[i].w, NULL, SSEi_MIXED_IN); for (j = 0; j < SIMD_PARA_MD5; ++j) memcpy(input_buf[i+j].c, crypt_key[i+j].c, 16*SIMD_COEF_32); } return; } #endif for (; i < til; ++i) { #if MD5_X2 unsigned int len[2]; len[0] = total_len_X86[i++]; if (i == m_count) len[1] = 0; else len[1] = total_len_X86[i]; #else unsigned int len = total_len_X86[i]; #endif // we call DoMD5o so as to 'not' change then length (it was already set) DoMD5o(input_buf_X86[i>>MD5_X2], len, input_buf_X86[i>>MD5_X2]); } } void DynamicFunc__overwrite_salt_to_input1_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { __SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0); } } else { for (; j < til; ++j) __SSE_append_string_to_input_unicode(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0); } return; } for (; j < til; ++j) __SSE_append_string_to_input(input_buf[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0); return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)utf16Str; #if MD5_X2 if (j&1) cp = input_buf_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[j>>MD5_X2].x1.B; for (z = 0; z < outlen; ++z) *cp++ = *cpi++; } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)cursalt; #if MD5_X2 if (j&1) cp = input_buf_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf_X86[j>>MD5_X2].x1.B; for (z = 0; z < saltlen; ++z) { *cp++ = *cpi++; *cp++ = 0; } } } return; } for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(input_buf_X86[j>>MD5_X2].x2.b2, cursalt, saltlen); else #endif memcpy(input_buf_X86[j>>MD5_X2].x1.b, cursalt, saltlen); } } void DynamicFunc__overwrite_salt_to_input2_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; int utf16 = md5_unicode_convert_get(tid); #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[27+1]; // 27 chars is 'max' that fits in SSE without overflow, so that is where we limit it at now int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, 27, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { __SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)utf16Str,outlen,0,0); } } else { for (; j < til; ++j) __SSE_append_string_to_input_unicode(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),(unsigned char*)cursalt,saltlen,0,0); } return; } for (; j < til; ++j) __SSE_append_string_to_input(input_buf2[j/SIMD_COEF_32].c,j&(SIMD_COEF_32-1),cursalt,saltlen,0,0); return; } #endif if (utf16) { if (utf16 == 2 || (options.target_enc != ASCII && options.target_enc != ISO_8859_1)) { UTF16 utf16Str[ENCODED_EFFECTIVE_MAX_LENGTH + 1]; int outlen; if (utf16 == 1) outlen = enc_to_utf16(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); else outlen = enc_to_utf16_be(utf16Str, ENCODED_EFFECTIVE_MAX_LENGTH, (unsigned char*)cursalt, saltlen) * sizeof(UTF16); if (outlen < 0) outlen = strlen16(utf16Str) * sizeof(UTF16); for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)utf16Str; #if MD5_X2 if (j&1) cp = input_buf2_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf2_X86[j>>MD5_X2].x1.B; for (z = 0; z < outlen; ++z) *cp++ = *cpi++; } } else { for (; j < til; ++j) { unsigned int z; unsigned char *cp, *cpi = (unsigned char*)cursalt; #if MD5_X2 if (j&1) cp = input_buf2_X86[j>>MD5_X2].x2.B2; else #endif cp = input_buf2_X86[j>>MD5_X2].x1.B; for (z = 0; z < saltlen; ++z) { *cp++ = *cpi++; *cp++ = 0; } } } return; } for (; j < til; ++j) { #if MD5_X2 if (j&1) memcpy(input_buf2_X86[j>>MD5_X2].x2.b2, cursalt, saltlen); else #endif memcpy(input_buf2_X86[j>>MD5_X2].x1.b, cursalt, saltlen); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input1 from the output2 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { idx = ( ((unsigned int)j)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cpo, *cpi; unsigned int i; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input1 from the output1 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { idx = ( ((unsigned int)j)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cpo, *cpi; unsigned int i; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys (the encrypted * 'first' key variable), and use a base-16 text formatting, and * append this to the first input buffer (adjusting the lengths) *************************************************************/ void DynamicFunc__append_from_last_output_as_base16(DYNA_OMP_PARAMS) { unsigned int j, til; #ifdef _OPENMP j = first; til = last; #else j = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; j < til; ++j) { unsigned int ip; idx = ( ((unsigned int)j)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][j & (SIMD_COEF_32 - 1)]; total_len[idx][j & (SIMD_COEF_32 - 1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). unsigned int k; for (k = 0; k < 16; ++k) { unsigned char v = crypt_key[idx].c[GETPOS(k, j&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+(k<<1), j&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf[idx].c[GETPOS(ip+(k<<1)+1, j&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf[idx].c[GETPOS(ip+32, j&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key[idx].c, j&(SIMD_COEF_32-1)); } return; } #endif for (; j < til; ++j) { unsigned char *cp, *cpi; unsigned int i; #if MD5_X2 if (j&1) {cp = &(input_buf_X86[j>>MD5_X2].x2.B2[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[j>>MD5_X2].x1.B[total_len_X86[j]]); cpi = crypt_key_X86[j>>MD5_X2].x1.B; } for (i = 0; i < 16; ++i) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len_X86[j] += 32; } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys2 (the encrypted * 'second' key variable), and base-16 appends to the 2nd input *************************************************************/ void DynamicFunc__append_from_last_output2_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { unsigned int ip, j; idx = ( ((unsigned int)i)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][i&(SIMD_COEF_32-1)]; total_len2[idx][i&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (j = 0; j < 16; ++j) { unsigned char v = crypt_key2[idx].c[GETPOS(j, i&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+(j<<1), i&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf2[idx].c[GETPOS(ip+(j<<1)+1, i&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf2[idx].c[GETPOS(ip+32, i&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len2_X86[i] += 32; } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input2 from the output1 data using base-16 * an optimization, if the same thing is done over and over * again, such as md5(md5(md5(md5($p)))) There, we would only * call the copy and set length once, then simply call copy. *************************************************************/ void DynamicFunc__overwrite_from_last_output_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j = i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } void DynamicFunc__overwrite_from_last_output2_to_input2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j = i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf2_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf2_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * overwrites start of input2 from the output2 data using base-16 *************************************************************/ void DynamicFunc__overwrite_from_last_output2_as_base16_no_size_fix(DYNA_OMP_PARAMS) { unsigned int i, til,j; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int idx; for (; i < til; ++i) { idx = ( ((unsigned int)i)/SIMD_COEF_32); __SSE_overwrite_output_base16_to_input(input_buf2[idx].w, crypt_key2[idx].c, i&(SIMD_COEF_32-1)); } return; } #endif j=i; for (; j < til; ++j) { unsigned char *cpo, *cpi; /* MD5_word *w; */ #if MD5_X2 if (j&1) {cpo = input_buf2_X86[j>>MD5_X2].x2.B2; cpi = crypt_key2_X86[j>>MD5_X2].x2.B2; /* w=input_buf_X86[j>>MD5_X2].x2.w2; */} else #endif {cpo = input_buf2_X86[j>>MD5_X2].x1.B; cpi = crypt_key2_X86[j>>MD5_X2].x1.B; /* w=input_buf_X86[j>>MD5_X2].x1.w; */ } for (i = 0; i < 16; ++i, ++cpi) { *cpo++ = dynamic_itoa16[*cpi>>4]; *cpo++ = dynamic_itoa16[*cpi&0xF]; } //MD5_swap(w,w,4); } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys1 (the encrypted * 'first' key variable), and base-16 appends to the 2nd input *************************************************************/ void DynamicFunc__append_from_last_output_to_input2_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; total_len2[idx][index&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (i = 0; i < 16; ++i) { unsigned char v = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf2[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf2[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf2[idx].w, crypt_key[idx].c, index&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cpi = crypt_key_X86[i>>MD5_X2].x2.B2; cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); } else #endif {cpi = crypt_key_X86[i>>MD5_X2].x1.B; cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]);} for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len2_X86[i] += 32; } } /************************************************************** * DYNAMIC primitive helper function * This will take the data stored in the crypt_keys2 (the encrypted * 'second' key variable), and base-16 appends to the 1st input *************************************************************/ void DynamicFunc__append_from_last_output2_to_input1_as_base16(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; total_len[idx][index&(SIMD_COEF_32-1)] += 32; if (!ip) __SSE_append_output_base16_to_input(input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); else if (ip&1) { // Note we are 100% unaligned, and it seems fastest to handle byte/byte (at this time). for (i = 0; i < 16; ++i) { unsigned char v = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+(i<<1), index&(SIMD_COEF_32-1))] = dynamic_itoa16[v>>4]; input_buf[idx].c[GETPOS(ip+(i<<1)+1, index&(SIMD_COEF_32-1))] = dynamic_itoa16[v&0xF]; } input_buf[idx].c[GETPOS(ip+32, index&(SIMD_COEF_32-1))] = 0x80; } else if ((ip&3)==0) __SSE_append_output_base16_to_input_semi_aligned_0(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); else __SSE_append_output_base16_to_input_semi_aligned_2(ip, input_buf[idx].w, crypt_key2[idx].c, index&(SIMD_COEF_32-1)); } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) { #if ARCH_ALLOWS_UNALIGNED *((unsigned short*)cp) = itoa16_w2[*cpi++]; cp += 2; #else unsigned char b = *cpi++; *cp++ = dynamic_itoa16[b>>4]; *cp++ = dynamic_itoa16[b&0xF]; #endif } *cp = 0; total_len_X86[i] += 32; } } void DynamicFunc__append_from_last_output2_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf[idx].w; uint32_t *pi = crypt_key2[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len_X86[i] += 16; } } void DynamicFunc__append2_from_last_output2_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index=i, idx; for (; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf2[idx].w; uint32_t *pi = crypt_key2[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key2[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len2[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key2_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len2_X86[i] += 16; } } void DynamicFunc__append_from_last_output1_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index, idx; for (index = i; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf[idx].w; uint32_t *pi = crypt_key[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf_X86[i>>MD5_X2].x2.B2[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf_X86[i>>MD5_X2].x1.B[total_len_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len_X86[i] += 16; } } void DynamicFunc__append2_from_last_output1_as_raw(DYNA_OMP_PARAMS) { unsigned int i, til; #ifdef _OPENMP i = first; til = last; #else i = 0; til = m_count; #endif #ifdef SIMD_COEF_32 if (dynamic_use_sse==1) { unsigned int index, idx; for (index = i; index < til; ++index) { unsigned int ip; idx = ( ((unsigned int)index)/SIMD_COEF_32); // This is the 'actual' work. ip = total_len2[idx][index&(SIMD_COEF_32-1)]; if (!ip) { uint32_t *po = input_buf2[idx].w; uint32_t *pi = crypt_key[idx].w; po += (index&(SIMD_COEF_32-1)); pi += (index&(SIMD_COEF_32-1)); for (i = 0; i < 4; i++) { *po = *pi; po += SIMD_COEF_32; pi += SIMD_COEF_32; } input_buf2[idx].c[GETPOS(16, index&(SIMD_COEF_32-1))] = 0x80; } else { for (i = 0; i < 16; ++i) input_buf2[idx].c[GETPOS(ip+i, index&(SIMD_COEF_32-1))] = crypt_key[idx].c[GETPOS(i, index&(SIMD_COEF_32-1))]; input_buf2[idx].c[GETPOS(ip+16, index&(SIMD_COEF_32-1))] = 0x80; } total_len2[idx][index&(SIMD_COEF_32-1)] += 16; } return; } #endif for (; i < til; ++i) { unsigned int j; unsigned char *cp, *cpi; #if MD5_X2 if (i&1) {cp = &(input_buf2_X86[i>>MD5_X2].x2.B2[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x2.B2; } else #endif {cp = &(input_buf2_X86[i>>MD5_X2].x1.B[total_len2_X86[i]]); cpi = crypt_key_X86[i>>MD5_X2].x1.B; } for (j = 0; j < 16; ++j) *cp++ = *cpi++; *cp = 0; total_len2_X86[i] += 16; } } /************************************************************** * DYNAMIC primitive helper function * Append salt #2 into input 1 *************************************************************/ void DynamicFunc__append_2nd_salt(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2); } /************************************************************** * DYNAMIC primitive helper function * Append salt #2 into input 2 *************************************************************/ void DynamicFunc__append_2nd_salt2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm cursalt2, saltlen2); } /************************************************************** * DYNAMIC primitive helper function * Append UserID into input 1 *************************************************************/ void DynamicFunc__append_userid(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm username, usernamelen); } /************************************************************** * DYNAMIC primitive helper function * Append UserID into input 2 *************************************************************/ void DynamicFunc__append_userid2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm username, usernamelen); } void DynamicFunc__append_input1_from_CONST1(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]); } void DynamicFunc__append_input1_from_CONST2(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]); } void DynamicFunc__append_input1_from_CONST3(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]); } void DynamicFunc__append_input1_from_CONST4(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]); } void DynamicFunc__append_input1_from_CONST5(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]); } void DynamicFunc__append_input1_from_CONST6(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]); } void DynamicFunc__append_input1_from_CONST7(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]); } void DynamicFunc__append_input1_from_CONST8(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]); } void DynamicFunc__append_input2_from_CONST1(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[0], curdat.ConstsLen[0]); } void DynamicFunc__append_input2_from_CONST2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[1], curdat.ConstsLen[1]); } void DynamicFunc__append_input2_from_CONST3(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[2], curdat.ConstsLen[2]); } void DynamicFunc__append_input2_from_CONST4(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[3], curdat.ConstsLen[3]); } void DynamicFunc__append_input2_from_CONST5(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[4], curdat.ConstsLen[4]); } void DynamicFunc__append_input2_from_CONST6(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[5], curdat.ConstsLen[5]); } void DynamicFunc__append_input2_from_CONST7(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[6], curdat.ConstsLen[6]); } void DynamicFunc__append_input2_from_CONST8(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm curdat.Consts[7], curdat.ConstsLen[7]); } void DynamicFunc__append_fld0(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]); } void DynamicFunc__append_fld1(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]); } void DynamicFunc__append_fld2(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]); } void DynamicFunc__append_fld3(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]); } void DynamicFunc__append_fld4(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]); } void DynamicFunc__append_fld5(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]); } void DynamicFunc__append_fld6(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]); } void DynamicFunc__append_fld7(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]); } void DynamicFunc__append_fld8(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]); } void DynamicFunc__append_fld9(DYNA_OMP_PARAMS) { __append_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]); } void DynamicFunc__append2_fld0(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[0], fld_lens[0]); } void DynamicFunc__append2_fld1(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[1], fld_lens[1]); } void DynamicFunc__append2_fld2(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[2], fld_lens[2]); } void DynamicFunc__append2_fld3(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[3], fld_lens[3]); } void DynamicFunc__append2_fld4(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[4], fld_lens[4]); } void DynamicFunc__append2_fld5(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[5], fld_lens[5]); } void DynamicFunc__append2_fld6(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[6], fld_lens[6]); } void DynamicFunc__append2_fld7(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[7], fld_lens[7]); } void DynamicFunc__append2_fld8(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[8], fld_lens[8]); } void DynamicFunc__append2_fld9(DYNA_OMP_PARAMS) { __append2_string(DYNA_OMP_PARAMSdm flds[9], fld_lens[9]); } void DynamicFunc__SSEtoX86_switch_input1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx, max; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = input_buf_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = input_buf_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = input_buf_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = input_buf[idx].w; max = total_len_X86[j] = (total_len[idx][0]); for (i = 1; i < SIMD_COEF_32; i++) if (max < (total_len_X86[j+i] = total_len[idx][j])) max = total_len_X86[j+i]; max = (max+3)>>2; for (k = 0; k < max; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { input_buf_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0; input_buf_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0; } #else for (i = 0; i < SIMD_COEF_32; i++) input_buf_X86[j+i].x1.b[total_len_X86[j+i]] = 0; #endif } #endif } void DynamicFunc__SSEtoX86_switch_input2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx, max; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = input_buf2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = input_buf2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = input_buf2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = input_buf2[idx].w; max = total_len2_X86[j] = (total_len2[idx][0]); for (i = 1; i < SIMD_COEF_32; i++) if (max < (total_len2_X86[j+i] = total_len2[idx][i])) max = total_len2_X86[j+i]; max = (max+3)>>2; for (k = 0; k < max; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } // get rid of the 0x80 #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { input_buf2_X86[(j>>1)+(i>>1)].x1.b[total_len_X86[j+i]] = 0; input_buf2_X86[(j>>1)+(i>>1)].x2.b2[total_len_X86[j+i+1]] = 0; } #else for (i = 0; i < SIMD_COEF_32; i++) input_buf2_X86[j+i].x1.b[total_len2_X86[j+i]] = 0; #endif } #endif } void DynamicFunc__SSEtoX86_switch_output1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if MD5_X2 for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key_X86[j+i].x1.w; #endif idx = j/SIMD_COEF_32; cpi = (void*)crypt_key[idx].c; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } } #endif } void DynamicFunc__SSEtoX86_switch_output2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = crypt_key2[idx].w; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpo[i]++ = *cpi++; } } #endif } void DynamicFunc__X86toSSE_switch_input1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int j, idx, idx_mod; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; __nonMP_DynamicFunc__clean_input(); for (j = 0; j < m_count; ++j) { idx = j/SIMD_COEF_32; idx_mod = j&(SIMD_COEF_32-1); total_len[idx][idx_mod] += total_len_X86[j]; #if (MD5_X2) if (j & 1) __SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>1].x2.B2,total_len_X86[j],0,1); else #endif __SSE_append_string_to_input(input_buf[idx].c,idx_mod,input_buf_X86[j>>MD5_X2].x1.B,total_len_X86[j],0,1); } #endif } void DynamicFunc__X86toSSE_switch_input2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int j, idx, idx_mod; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; __nonMP_DynamicFunc__clean_input2(); for (j = 0; j < m_count; ++j) { idx = j/SIMD_COEF_32; idx_mod = j&(SIMD_COEF_32-1); total_len2[idx][idx_mod] += total_len2_X86[j]; #if (MD5_X2) if (j & 1) __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>1].x2.B2,total_len2_X86[j],0,1); else #endif __SSE_append_string_to_input(input_buf2[idx].c,idx_mod,input_buf2_X86[j>>MD5_X2].x1.B,total_len2_X86[j],0,1); } #endif } void DynamicFunc__X86toSSE_switch_output1(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = (void*)crypt_key[idx].c; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpi++ = *cpo[i]++; } } #endif } void DynamicFunc__X86toSSE_switch_output2(DYNA_OMP_PARAMS) { #ifdef SIMD_COEF_32 unsigned int i, j, k, idx; if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; for (j = 0; j < m_count; j += SIMD_COEF_32) { uint32_t *cpi; uint32_t *cpo[SIMD_COEF_32]; #if (MD5_X2) for (i = 0; i < SIMD_COEF_32; i += 2) { cpo[i ] = crypt_key2_X86[(j>>1)+(i>>1)].x1.w; cpo[i+1] = crypt_key2_X86[(j>>1)+(i>>1)].x2.w2; } #else for (i = 0; i < SIMD_COEF_32; i++) cpo[i] = crypt_key2_X86[j+i].x1.w; #endif idx = j / SIMD_COEF_32; cpi = crypt_key2[idx].w; for (k = 0; k < 4; ++k) { for (i = 0; i < SIMD_COEF_32; i++) *cpi++ = *cpo[i]++; } } #endif } // This function, simply 'switches' back to SSE It does NOT copy any data from X86 to SSE void DynamicFunc__ToSSE(DYNA_OMP_PARAMS) { if (dynamic_use_sse == 0) return; dynamic_use_sse = 1; } // This function, simply 'switches' to X86 It does NOT copy any data from SSE to X86 void DynamicFunc__ToX86(DYNA_OMP_PARAMS) { if (dynamic_use_sse == 0) return; dynamic_use_sse = 2; } void DynamicFunc__base16_convert_locase(DYNA_OMP_PARAMS) { dynamic_itoa16 = itoa16; itoa16_w2=itoa16_w2_l; } void DynamicFunc__base16_convert_upcase(DYNA_OMP_PARAMS) { dynamic_itoa16 = itoa16u; itoa16_w2=itoa16_w2_u; } /************************************************************** * DEPRICATED functions. These are the older pseudo functions * which we now have flags for. We keep them, so that we can * add the proper flags, even if the user is running an older * script. *************************************************************/ void DynamicFunc__InitialLoadKeysToInput(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1(DYNA_OMP_PARAMS) {} void DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32(DYNA_OMP_PARAMS) {} /************************************************************** ************************************************************** ************************************************************** ************************************************************** * DYNAMIC primitive helper function * This is the END of the primitives. ************************************************************** ************************************************************** ************************************************************** *************************************************************/ static DYNAMIC_primitive_funcp *ConvertFuncs(DYNAMIC_primitive_funcp p, unsigned int *count) { static DYNAMIC_primitive_funcp fncs[20]; *count = 0; if (p==DynamicFunc__InitialLoadKeysToInput || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) return fncs; // ignore these #ifndef SIMD_COEF_32 if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 || p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 || p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 || p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 || p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86) return fncs; // we ignore these functions 100% in x86 mode. #endif // if (p==DynamicFunc__append_input2_from_CONST1) { // fncs[0] = DynamicFunc__set_input2; // fncs[1] = DynamicFunc__set_CONST1; // fncs[2] = DynamicFunc__append_CONST; // *count = 3; // } /* LOOK INTO THIS!!!!! This may not be valid, now that SHA1 is handled 100% outside of the SSE2 code. But I am not sure just WTF this is supposed to do anyway, since not LE should be using CTX only??? */ #if !ARCH_LITTLE_ENDIAN if (/*p==DynamicFunc__SHA1_crypt_input1_append_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_append_input2 || /*p==DynamicFunc__SHA1_crypt_input2_append_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_append_input1 || /*p==DynamicFunc__SHA1_crypt_input1_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input1 || /*p==DynamicFunc__SHA1_crypt_input2_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input2 || /*p==DynamicFunc__SHA1_crypt_input1_overwrite_input2_base16 ||*/ p==DynamicFunc__SHA1_crypt_input1_overwrite_input2 || /*p==DynamicFunc__SHA1_crypt_input2_overwrite_input1_base16 ||*/ p==DynamicFunc__SHA1_crypt_input2_overwrite_input1 || p==DynamicFunc__SHA1_crypt_input1_to_output1_FINAL || p==DynamicFunc__SHA1_crypt_input2_to_output1_FINAL) curdat.force_md5_ctx = 0; #endif *count = 1; fncs[0] = p; return fncs; } #ifdef _OPENMP static int isBadOMPFunc(DYNAMIC_primitive_funcp p) { // If ANY of these functions are seen, we can NOT use OMP for this single format. #if SIMD_COEF_32 if (p==DynamicFunc__SSEtoX86_switch_input1 || p==DynamicFunc__SSEtoX86_switch_input2 || p==DynamicFunc__SSEtoX86_switch_output1 || p==DynamicFunc__SSEtoX86_switch_output2 || p==DynamicFunc__X86toSSE_switch_input1 || p==DynamicFunc__X86toSSE_switch_input2 || p==DynamicFunc__X86toSSE_switch_output1 || p==DynamicFunc__X86toSSE_switch_output2 || p==DynamicFunc__ToSSE || p==DynamicFunc__ToX86) return 1; #endif if (p==DynamicFunc__base16_convert_locase || p==DynamicFunc__base16_convert_upcase) return 1; return 0; } #endif #define RETURN_TRUE_IF_BIG_FUNC(H) if (p==DynamicFunc__##H##_crypt_input1_append_input2 || \ p==DynamicFunc__##H##_crypt_input2_append_input1 || \ p==DynamicFunc__##H##_crypt_input1_overwrite_input1 || \ p==DynamicFunc__##H##_crypt_input2_overwrite_input2 || \ p==DynamicFunc__##H##_crypt_input1_overwrite_input2 || \ p==DynamicFunc__##H##_crypt_input2_overwrite_input1 || \ p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL || \ p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL) \ return 1 static int isMD4Func(DYNAMIC_primitive_funcp p) { // handle flats RETURN_TRUE_IF_BIG_FUNC(MD4); // handle older mmx_coef variants if (p==DynamicFunc__crypt_md4 || p==DynamicFunc__crypt_md4_in1_to_out2 || p==DynamicFunc__crypt2_md4 || p==DynamicFunc__crypt_md4_in2_to_out1) return 1; return 0; } #ifdef _OPENMP // Only used in OMP code, to compute LCM granularity. So we #ifdef it out to avoid compiler warnings. #ifdef SIMD_COEF_32 // otherwise unused static int isMD5Func(DYNAMIC_primitive_funcp p) { // handle flats RETURN_TRUE_IF_BIG_FUNC(MD5); // handle older mmx_coef variants if (p==DynamicFunc__crypt_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1 || p==DynamicFunc__crypt_md5_in1_to_out2 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2 || p==DynamicFunc__crypt_md5_to_input_raw || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen || p==DynamicFunc__crypt_md5_in2_to_out1 || p==DynamicFunc__crypt_md5_to_input_raw_Overwrite_NoLen_but_setlen_in_SSE || p==DynamicFunc__crypt2_md5 || p==DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) return 1; return 0; } #endif #endif static int isSHA1Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA1); return 0; } static int isSHA2_256Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA224); RETURN_TRUE_IF_BIG_FUNC(SHA256); return 0; } static int isSHA2_512Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA384); RETURN_TRUE_IF_BIG_FUNC(SHA512); return 0; } static int isGOSTFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(GOST); return 0; } static int isTigerFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(Tiger); return 0; } static int isWHIRLFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(WHIRLPOOL); return 0; } static int isRIPEMDFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(RIPEMD128); RETURN_TRUE_IF_BIG_FUNC(RIPEMD160); RETURN_TRUE_IF_BIG_FUNC(RIPEMD256); RETURN_TRUE_IF_BIG_FUNC(RIPEMD320); return 0; } static int isHAVALFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(HAVAL128_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL128_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL160_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL192_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL224_5); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_3); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_4); RETURN_TRUE_IF_BIG_FUNC(HAVAL256_5); return 0; } static int isMD2Func(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(MD2); return 0; } static int isPANAMAFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(PANAMA); return 0; } static int isSKEINFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SKEIN224); RETURN_TRUE_IF_BIG_FUNC(SKEIN256); RETURN_TRUE_IF_BIG_FUNC(SKEIN384); RETURN_TRUE_IF_BIG_FUNC(SKEIN512); return 0; } static int isKECCAKFunc(DYNAMIC_primitive_funcp p) { RETURN_TRUE_IF_BIG_FUNC(SHA3_224); RETURN_TRUE_IF_BIG_FUNC(SHA3_256); RETURN_TRUE_IF_BIG_FUNC(SHA3_384); RETURN_TRUE_IF_BIG_FUNC(SHA3_512); RETURN_TRUE_IF_BIG_FUNC(KECCAK_256); RETURN_TRUE_IF_BIG_FUNC(KECCAK_512); return 0; } // LARGE_HASH_EDIT_POINT (Add a new IsXXXFunc() type function) static int isLargeHashFinalFunc(DYNAMIC_primitive_funcp p) { #undef IF #define IF(H) p==DynamicFunc__##H##_crypt_input1_to_output1_FINAL||p==DynamicFunc__##H##_crypt_input2_to_output1_FINAL if (IF(SHA1)||IF(SHA224)||IF(SHA256)||IF(SHA384)||IF(SHA512)||IF(GOST)||IF(WHIRLPOOL)||IF(Tiger)||IF(RIPEMD128)|| IF(RIPEMD160)||IF(RIPEMD256)||IF(RIPEMD320)|| IF(HAVAL128_3)||IF(HAVAL128_4)||IF(HAVAL128_5)||IF(HAVAL160_3)||IF(HAVAL160_4)||IF(HAVAL160_5)|| IF(HAVAL192_3)||IF(HAVAL192_4)||IF(HAVAL192_5)||IF(HAVAL224_3)||IF(HAVAL224_4)||IF(HAVAL224_5)|| IF(HAVAL256_3)||IF(HAVAL256_4)||IF(HAVAL256_5)||IF(MD2)||IF(PANAMA)||IF(SKEIN224)||IF(SKEIN256)|| IF(SKEIN384)||IF(SKEIN512)||IF(SHA3_224)||IF(SHA3_256)||IF(SHA3_384)||IF(SHA3_512)|| IF(KECCAK_256)||IF(KECCAK_512)) // LARGE_HASH_EDIT_POINT return 1; return 0; } #ifdef _OPENMP #ifdef SIMD_COEF_32 // Simple euclid algorithm for GCD static int GCD (int a, int b) { while (b) { int t = b; b = a % b; a = t; } return a; } // simple algorithm for LCM is (a*b)/GCD(a,b) static int LCM(int a, int b) { a/=GCD(a,b); return a*b; } #endif static void dyna_setupOMP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt) { unsigned int i; #ifndef SIMD_COEF_32 curdat.omp_granularity=OMP_INC; #else if ((curdat.pSetup->flags& MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) curdat.omp_granularity=OMP_INC; else { curdat.omp_granularity = 1; for (i=0; Setup->pFuncs[i]; ++i) { if (isMD5Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD5*SIMD_COEF_32); else if (isMD4Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_MD4*SIMD_COEF_32); else if (isSHA1Func(Setup->pFuncs[i])) curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA1*SIMD_COEF_32); else if (isSHA2_256Func(Setup->pFuncs[i])) #if SIMD_COEF_32 #if SIMD_PARA_SHA256 curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA256*SIMD_COEF_32); #else curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_32); #endif #else curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC); #endif else if (isSHA2_512Func(Setup->pFuncs[i])) #if SIMD_COEF_64 #if SIMD_PARA_SHA512 curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_PARA_SHA512*SIMD_COEF_64); #else curdat.omp_granularity = LCM(curdat.omp_granularity, SIMD_COEF_64); #endif #else curdat.omp_granularity=LCM(curdat.omp_granularity, OMP_INC); #endif } } #endif for (i=0; Setup->pFuncs[i]; ++i) { if (isBadOMPFunc(Setup->pFuncs[i])) pFmt->params.flags &= (~(FMT_OMP|FMT_OMP_BAD)); } if ((pFmt->params.flags&FMT_OMP)==FMT_OMP && (curdat.pSetup->startFlags&MGF_POOR_OMP)==MGF_POOR_OMP) pFmt->params.flags |= FMT_OMP_BAD; } #endif int dynamic_SETUP(DYNAMIC_Setup *Setup, struct fmt_main *pFmt) { unsigned int i, j, cnt, cnt2, x; DYNAMIC_primitive_funcp *pFuncs; if (Setup->flags & MGF_ColonNOTValid) { extern struct options_main options; if (options.loader.field_sep_char == ':') { return 0; } } // Deal with depricated 1st functions. Convert them to proper 'flags' if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeysToInput) Setup->startFlags |= MGF_KEYS_INPUT; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2) Setup->startFlags |= MGF_KEYS_CRYPT_IN2; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1) Setup->startFlags |= MGF_KEYS_BASE16_IN1; if (Setup->pFuncs[0] == DynamicFunc__InitialLoadKeys_md5crypt_ToOutput2_Base16_to_Input1_offset32) Setup->startFlags |= MGF_KEYS_BASE16_IN1_Offset32; curdat.dynamic_40_byte_input = ((Setup->startFlags&MGF_INPUT_20_BYTE)==MGF_INPUT_20_BYTE) ? 1 : 0; curdat.dynamic_48_byte_input = ((Setup->startFlags&MGF_INPUT_24_BYTE)==MGF_INPUT_24_BYTE) ? 1 : 0; curdat.dynamic_64_byte_input = ((Setup->startFlags&MGF_INPUT_32_BYTE)==MGF_INPUT_32_BYTE) ? 1 : 0; curdat.dynamic_56_byte_input = ((Setup->startFlags&MGF_INPUT_28_BYTE)==MGF_INPUT_28_BYTE) ? 1 : 0; curdat.dynamic_80_byte_input = ((Setup->startFlags&MGF_INPUT_40_BYTE)==MGF_INPUT_40_BYTE) ? 1 : 0; curdat.dynamic_96_byte_input = ((Setup->startFlags&MGF_INPUT_48_BYTE)==MGF_INPUT_48_BYTE) ? 1 : 0; curdat.dynamic_128_byte_input= ((Setup->startFlags&MGF_INPUT_64_BYTE)==MGF_INPUT_64_BYTE) ? 1 : 0; curdat.FldMask = 0; curdat.b2Salts = ((Setup->flags&MGF_SALTED2)==MGF_SALTED2) ? 1 : 0; curdat.dynamic_base16_upcase = ((Setup->flags&MGF_BASE_16_OUTPUT_UPCASE)==MGF_BASE_16_OUTPUT_UPCASE) ? 1 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD0)==MGF_FLD0) ? MGF_FLD0 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD1)==MGF_FLD1) ? MGF_FLD1 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD2)==MGF_FLD2) ? MGF_FLD2 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD3)==MGF_FLD3) ? MGF_FLD3 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD4)==MGF_FLD4) ? MGF_FLD4 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD5)==MGF_FLD5) ? MGF_FLD5 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD6)==MGF_FLD6) ? MGF_FLD6 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD7)==MGF_FLD7) ? MGF_FLD7 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD8)==MGF_FLD8) ? MGF_FLD8 : 0; curdat.FldMask |= ((Setup->flags&MGF_FLD9)==MGF_FLD9) ? MGF_FLD9 : 0; curdat.dynamic_base64_inout = 0; curdat.dynamic_salt_as_hex = 0; curdat.dynamic_salt_as_hex_format_type = 0; curdat.force_md5_ctx = 0; curdat.nUserName = 0; curdat.nPassCase = 1; curdat.md5_startup_in_x86 = curdat.dynamic_use_sse = 0; // if 0, then never use SSE2 curdat.init = 0; curdat.pSetup = Setup; pFmt->methods.binary = get_binary; pFmt->methods.cmp_all=cmp_all; pFmt->methods.cmp_one=cmp_one; pFmt->methods.source=fmt_default_source; pFmt->methods.salt = get_salt; pFmt->methods.done = done; pFmt->methods.set_salt = set_salt; pFmt->methods.salt_hash = salt_hash; //pFmt->params.format_name = str_alloc_copy(Setup->szFORMAT_NAME); pFmt->params.format_name = ""; pFmt->params.benchmark_length = 0; // NOTE 0 'assumes' salted. If unsalted, we set back to -1 pFmt->params.salt_size = 0; curdat.using_flat_buffers_sse2_ok = 0; // used to distingish MGF_NOTSSE2Safe from MGF_FLAT_BUFFERS if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) curdat.using_flat_buffers_sse2_ok = 1; #ifdef SIMD_COEF_32 curdat.dynamic_use_sse = 1; // if 1, then we are in SSE2 mode (but can switch out) if ((Setup->flags & MGF_NOTSSE2Safe) == MGF_NOTSSE2Safe) { curdat.dynamic_use_sse = 0; // Do not use SSE code at all. } else if ((Setup->flags & MGF_FLAT_BUFFERS) == MGF_FLAT_BUFFERS) { curdat.dynamic_use_sse = 0; // uses flat buffers but will use SSE code (large formats use the flat buffers, and the SSE2 code 'mixes' them). curdat.using_flat_buffers_sse2_ok = 1; } else if ((Setup->flags & MGF_StartInX86Mode) == MGF_StartInX86Mode) { curdat.dynamic_use_sse = 2; // if 2, then we are in SSE2 mode, but currently using X86 (and can switch back to SSE2). curdat.md5_startup_in_x86 = 1; } if (curdat.dynamic_use_sse || curdat.using_flat_buffers_sse2_ok) { pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT; pFmt->params.algorithm_name = ALGORITHM_NAME; } else { pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86; pFmt->params.algorithm_name = ALGORITHM_NAME_X86; } #else pFmt->params.max_keys_per_crypt = MAX_KEYS_PER_CRYPT_X86; pFmt->params.algorithm_name = ALGORITHM_NAME_X86; #endif pFmt->params.min_keys_per_crypt = pFmt->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; dynamic_use_sse = curdat.dynamic_use_sse; // Ok, set the new 'constants' data memset(curdat.Consts, 0, sizeof(curdat.Consts)); memset(curdat.ConstsLen, 0, sizeof(curdat.ConstsLen)); for (curdat.nConsts = 0; curdat.nConsts < 8; ++curdat.nConsts) { if (Setup->pConstants[curdat.nConsts].Const == NULL) break; //curdat.Consts[curdat.nConsts] = (unsigned char*)str_alloc_copy(Setup->pConstants[curdat.nConsts].Const); //curdat.ConstsLen[curdat.nConsts] = strlen(Setup->pConstants[curdat.nConsts].Const); // we really do not 'have' to null terminate, but do just to be on the 'safe' side. curdat.Consts[curdat.nConsts] = mem_alloc_tiny(Setup->pConstants[curdat.nConsts].len+1, MEM_ALIGN_NONE); memcpy(curdat.Consts[curdat.nConsts], Setup->pConstants[curdat.nConsts].Const, Setup->pConstants[curdat.nConsts].len); curdat.Consts[curdat.nConsts][Setup->pConstants[curdat.nConsts].len] = 0; curdat.ConstsLen[curdat.nConsts] = Setup->pConstants[curdat.nConsts].len; } if ( (Setup->flags & MGF_INPBASE64) == MGF_INPBASE64) { curdat.dynamic_base64_inout = 1; pFmt->methods.binary = binary_b64; } if ( (Setup->flags & MGF_INPBASE64m) == MGF_INPBASE64m) { curdat.dynamic_base64_inout = 3; pFmt->methods.binary = binary_b64m; } if ( (Setup->flags & MGF_INPBASE64b) == MGF_INPBASE64b) { curdat.dynamic_base64_inout = 5; pFmt->methods.binary = binary_b64b; } if ( (Setup->flags & MGF_INPBASE64_4x6) == MGF_INPBASE64_4x6) { curdat.dynamic_base64_inout = 2; pFmt->methods.binary = binary_b64_4x6; pFmt->methods.cmp_all = cmp_all_64_4x6; pFmt->methods.cmp_one = cmp_one_64_4x6; #if !ARCH_LITTLE_ENDIAN pFmt->methods.binary_hash[0] = binary_hash_0_64x4; pFmt->methods.binary_hash[1] = binary_hash_1_64x4; pFmt->methods.binary_hash[2] = binary_hash_2_64x4; pFmt->methods.binary_hash[3] = binary_hash_3_64x4; pFmt->methods.binary_hash[4] = binary_hash_4_64x4; pFmt->methods.binary_hash[5] = binary_hash_5_64x4; pFmt->methods.get_hash[0] = get_hash_0_64x4; pFmt->methods.get_hash[1] = get_hash_1_64x4; pFmt->methods.get_hash[2] = get_hash_2_64x4; pFmt->methods.get_hash[3] = get_hash_3_64x4; pFmt->methods.get_hash[4] = get_hash_4_64x4; pFmt->methods.get_hash[5] = get_hash_5_64x4; #endif // Not enough bits in a single WORD to do the 7th one. pFmt->methods.binary_hash[6] = NULL; pFmt->methods.get_hash[6] = NULL; } // printf ("%.13s",Setup->szFORMAT_NAME); if ( (Setup->flags & (MGF_INPBASE64|MGF_INPBASE64_4x6|MGF_INPBASE64a|MGF_INPBASE64m|MGF_INPBASE64b)) == 0) { pFmt->params.flags |= FMT_SPLIT_UNIFIES_CASE; // printf (" Setting FMT_SPLIT_UNIFIES_CASE"); if (pFmt->methods.split == split) { pFmt->methods.split = split_UC; // printf (" split set to split_UC()\n"); } } // else printf (" split set to split()\n"); if (Setup->flags & MGF_UTF8) pFmt->params.flags |= FMT_UTF8; if (Setup->flags & MGF_INPBASE64a) { curdat.dynamic_base64_inout = 1; pFmt->methods.binary = binary_b64a; } if ( (Setup->flags & MGF_USERNAME) == MGF_USERNAME) curdat.nUserName = 1; if ( (Setup->flags & MGF_USERNAME_UPCASE) == MGF_USERNAME_UPCASE) curdat.nUserName = 2; if ( (Setup->flags & MGF_USERNAME_LOCASE) == MGF_USERNAME_LOCASE) curdat.nUserName = 3; // Ok, what 'flag' in the format struct, do we clear??? if ( (Setup->flags & MGF_PASSWORD_UPCASE) == MGF_PASSWORD_UPCASE) { curdat.nPassCase = 2; pFmt->params.flags &= (~FMT_CASE); } if ( (Setup->flags & MGF_PASSWORD_LOCASE) == MGF_PASSWORD_LOCASE) { curdat.nPassCase = 3; pFmt->params.flags &= (~FMT_CASE); } if ( (Setup->flags & MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) { curdat.dynamic_salt_as_hex = 1; curdat.dynamic_salt_as_hex_format_type = Setup->flags >> 56; } if ( (Setup->flags & MGF_SALT_AS_HEX_TO_SALT2) == MGF_SALT_AS_HEX_TO_SALT2) { curdat.dynamic_salt_as_hex = 2; if (curdat.b2Salts) return !fprintf(stderr, "Error invalid format %s: MGF_SALT_AS_HEX_TO_SALT2 and MGF_SALTED2 are not valid to use in same format\n", Setup->szFORMAT_NAME); curdat.b2Salts = 2; } if ( (Setup->flags & MGF_SALT_UNICODE_B4_CRYPT) == MGF_SALT_UNICODE_B4_CRYPT && curdat.dynamic_salt_as_hex) curdat.dynamic_salt_as_hex |= 0x100; if ( (Setup->flags & MGF_SALTED) == 0) { curdat.dynamic_FIXED_SALT_SIZE = 0; pFmt->params.benchmark_length = -1; pFmt->params.salt_size = 0; } else { pFmt->params.salt_size = sizeof(void *); if (Setup->SaltLen > 0) curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen; else { // says we have a salt, but NOT a fixed sized one that we 'know' about. // if the SaltLen is -1, then there is NO constraints. If the SaltLen // is -12 (or any other neg number other than -1), then there is no // fixed salt length, but the 'max' salt size is -SaltLen. So, -12 // means any salt from 1 to 12 is 'valid'. if (Setup->SaltLen > -2) curdat.dynamic_FIXED_SALT_SIZE = -1; else { curdat.dynamic_FIXED_SALT_SIZE = Setup->SaltLen; #if !defined (SIMD_COEF_32) // for non-sse, we limit ourselves to 110 bytes, not 55. So, we can add 55 to this value curdat.dynamic_FIXED_SALT_SIZE -= 55; #endif } } } if (Setup->MaxInputLen) pFmt->params.plaintext_length = Setup->MaxInputLen; else { if ( ((Setup->flags&MGF_FLAT_BUFFERS)==MGF_FLAT_BUFFERS) || ((Setup->flags&MGF_NOTSSE2Safe)==MGF_NOTSSE2Safe)) { pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 32) pFmt->params.plaintext_length = 32; } else { pFmt->params.plaintext_length = 55 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 1) { pFmt->params.plaintext_length = 1; fprintf(stderr, "\nError, for format %s, MMX build, is not valid due to TOO long of a SaltLength\n", Setup->szFORMAT_NAME); } } } #ifndef SIMD_COEF_32 if (Setup->MaxInputLenX86) { pFmt->params.plaintext_length = Setup->MaxInputLenX86; } else { if (Setup->SaltLenX86) pFmt->params.plaintext_length = 110 - abs(Setup->SaltLenX86); else pFmt->params.plaintext_length = 110 - abs(Setup->SaltLen); if (pFmt->params.plaintext_length < 32) pFmt->params.plaintext_length = 32; } #endif curdat.store_keys_in_input = !!(Setup->startFlags&MGF_KEYS_INPUT ); curdat.input2_set_len32 = !!(Setup->startFlags&MGF_SET_INP2LEN32); if (Setup->startFlags&MGF_SOURCE) { if (Setup->startFlags&MGF_INPUT_20_BYTE) pFmt->methods.source = source_20_hex; else if (Setup->startFlags&MGF_INPUT_28_BYTE) pFmt->methods.source = source_28_hex; else if (Setup->startFlags&MGF_INPUT_32_BYTE) pFmt->methods.source = source_32_hex; else if (Setup->startFlags&MGF_INPUT_40_BYTE) pFmt->methods.source = source_40_hex; else if (Setup->startFlags&MGF_INPUT_48_BYTE) pFmt->methods.source = source_48_hex; else if (Setup->startFlags&MGF_INPUT_64_BYTE) pFmt->methods.source = source_64_hex; else pFmt->methods.source = source; } if (!curdat.store_keys_in_input && Setup->startFlags&MGF_KEYS_INPUT_BE_SAFE) curdat.store_keys_in_input = 3; curdat.store_keys_in_input_unicode_convert = !!(Setup->startFlags&MGF_KEYS_UNICODE_B4_CRYPT); if (curdat.store_keys_in_input_unicode_convert && curdat.store_keys_in_input) return !fprintf(stderr, "Error invalid format %s: Using MGF_KEYS_INPUT and MGF_KEYS_UNICODE_B4_CRYPT in same format is NOT valid\n", Setup->szFORMAT_NAME); curdat.store_keys_normal_but_precompute_hash_to_output2 = !!(Setup->startFlags&MGF_KEYS_CRYPT_IN2); curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = !!(Setup->startFlags&MGF_KEYS_BASE16_IN1); if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1) curdat.store_keys_normal_but_precompute_hash_to_output2 = 1; #define IF_CDOFF32(F,L) if (!curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) \ curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = \ (!!((Setup->startFlags&MGF_KEYS_BASE16_IN1_Offset_TYPE)==MGF_KEYS_BASE16_IN1_Offset_ ## F))*L curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX = 0; IF_CDOFF32(MD5,32); IF_CDOFF32(MD4,32); IF_CDOFF32(SHA1,40); IF_CDOFF32(SHA224,56); IF_CDOFF32(SHA256,64); IF_CDOFF32(SHA384,96); IF_CDOFF32(SHA512,128); IF_CDOFF32(GOST,64); IF_CDOFF32(WHIRLPOOL,128); IF_CDOFF32(Tiger,48); IF_CDOFF32(RIPEMD128,32); IF_CDOFF32(RIPEMD160,40); IF_CDOFF32(RIPEMD256,64); IF_CDOFF32(RIPEMD320,80); IF_CDOFF32(MD2,32); IF_CDOFF32(PANAMA,64); IF_CDOFF32(HAVAL128_3,32); IF_CDOFF32(HAVAL160_3,40); IF_CDOFF32(HAVAL192_3,48); IF_CDOFF32(HAVAL224_3,56); IF_CDOFF32(HAVAL256_3,64); IF_CDOFF32(HAVAL128_4,32); IF_CDOFF32(HAVAL160_4,40); IF_CDOFF32(HAVAL192_4,48); IF_CDOFF32(HAVAL224_4,56); IF_CDOFF32(HAVAL256_4,64); IF_CDOFF32(HAVAL128_5,32); IF_CDOFF32(HAVAL160_5,40); IF_CDOFF32(HAVAL192_5,48); IF_CDOFF32(HAVAL224_5,56); IF_CDOFF32(HAVAL256_5,64); IF_CDOFF32(SKEIN224,56); IF_CDOFF32(SKEIN256,64); IF_CDOFF32(SKEIN384,96); IF_CDOFF32(SKEIN512,128); IF_CDOFF32(SHA3_224,56); IF_CDOFF32(SHA3_256,64); IF_CDOFF32(SHA3_384,96); IF_CDOFF32(SHA3_512,128); IF_CDOFF32(KECCAK_256,64); IF_CDOFF32(KECCAK_512,128); // LARGE_HASH_EDIT_POINT if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1_offsetX) { curdat.store_keys_normal_but_precompute_hash_to_output2 = 1; } curdat.store_keys_normal_but_precompute_hash_to_output2_base16_type = Setup->startFlags>>56; if ((Setup->startFlags) == 0) { // Ok, if we do not have some 'special' loader function, we MUST first clean some // input. If that is not done, there is NO WAY this is a valid format. This is // NOT an intelligent check, but more like the dummy lights on newer automobiles. // You know it will not work, but do not know 'why', nor should you care. if (Setup->pFuncs[0] != DynamicFunc__clean_input && Setup->pFuncs[0] != DynamicFunc__clean_input2 && Setup->pFuncs[0] != DynamicFunc__clean_input_kwik && Setup->pFuncs[0] != DynamicFunc__clean_input2_kwik && Setup->pFuncs[0] != DynamicFunc__clean_input_full) return !fprintf(stderr, "Error invalid format %s: The first command MUST be a clean of input 1 or input 2 OR a special key 2 input loader function\n", Setup->szFORMAT_NAME); } if ( (Setup->flags&MGF_SALTED2)==MGF_SALTED2 && (Setup->flags&MGF_SALT_AS_HEX) == MGF_SALT_AS_HEX) { // if the user wants salt_as_hex, then here can NOT be 2 salts. return !fprintf(stderr, "Error invalid format %s: If using MGF_SALT_AS_HEX flag, then you can NOT have a 2nd salt.\n", Setup->szFORMAT_NAME); } if (Setup->pFuncs && Setup->pFuncs[0]) { unsigned int z; for (z = 0; Setup->pFuncs[z]; ++z) ; z += 50; curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD); j = 0; #if !ARCH_LITTLE_ENDIAN // for bigendian, we do NOT store into keys, since we byte swap them. if (curdat.store_keys_in_input==1) { // this is only a minor speed hit, so simply fix by doing this. There is an // extra memcpy, that is it. curdat.store_keys_in_input = 0; curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys; } // NOTE NOTE NOTE, FIXME. These are 'hacks' which slow stuff way down. We should look at // building preloads that CAN do this. Store key input to input 1, but then do not use // input 1. Put a copy to input 2, then append, etc. In that way, we cut the number of // MD5's down by at least 1. // // But for now, just get it working. Get it working faster later. // NOTE, these are commented out now. I am not sure why they were there // I think the thought was for SIMD, BUT SIMD is not used on Sparc // I am leaving this code for now, BUT I think it should NOT be here. // I was getting failures on the 16 byte sph formats, for any // hash(hash($p).$s) such as md2(md2($p).$s) However, the modifications // where curdat.store_keys_in_input==1 is absolutely needed, or we have // get_key() failures all over the place. // note, with Setup->pFuncs[0]==DynamicFunc__set_input_len_32, we only will handle type 6 and 7 // for now we have this 'turned' off. It is fixed for type 6, 7 and 14. It is left on for the // john.ini stuff. Thus, if someone builds the intel version type 6, it will work (but slower). // if (curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1==1 && Setup->pFuncs[0]==DynamicFunc__set_input_len_32) { // curdat.store_keys_normal_but_precompute_hash_to_output2_base16_to_input1 = 0; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__append_keys; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__crypt_md5; // curdat.dynamic_FUNCTIONS[j++] = DynamicFunc__clean_input; // Setup->pFuncs[0] = DynamicFunc__append_from_last_output_as_base16; // } #endif for (i=0; Setup->pFuncs[i]; ++i) { if (j > z-10) { unsigned int k; z += 100; curdat.dynamic_FUNCTIONS = mem_alloc_tiny(z*sizeof(DYNAMIC_primitive_funcp), MEM_ALIGN_WORD); for (k = 0; k <= j; ++k) curdat.dynamic_FUNCTIONS[k] = curdat.dynamic_FUNCTIONS[k]; } if (curdat.store_keys_in_input) { if (Setup->pFuncs[i] == DynamicFunc__append_keys) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_keys2) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_keys2 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__clean_input) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but clean_input called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_salt) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_salt called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output2_to_input1_as_base16) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output2_to_input1_as_base16 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output2_to_input1_as_base16_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output2_to_input1_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_from_last_output_as_base16) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_from_last_output_as_base16s called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_from_last_output_as_base16_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but overwrite_from_last_output_as_base16_no_size_fix called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but append_2nd_salt called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__set_input_len_32) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__set_input_len_64) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__overwrite_salt_to_input1_no_size_fix) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); if (Setup->pFuncs[i] == DynamicFunc__append_input_from_input2) return !fprintf(stderr, "Error invalid format %s: MGF_KEYS_INPUT used, but DynamicFunc__set_input_len_32 called and that is invalid\n", Setup->szFORMAT_NAME); } // Ok if copy constants are set, make SURE we have that many constants. if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST1 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST1) && curdat.nConsts == 0) return !fprintf(stderr, "Error invalid format %s: Append Constant function called, but NO constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST2 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST2) && curdat.nConsts < 2) return !fprintf(stderr, "Error invalid format %s: Append Constant #2 function called, but NO constants, or less than 2 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST3 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST3) && curdat.nConsts < 3) return !fprintf(stderr, "Error invalid format %s: Append Constant #3 function called, but NO constants, or less than 3 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST4 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST4) && curdat.nConsts < 4) return !fprintf(stderr, "Error invalid format %s: Append Constant #4 function called, but NO constants, or less than 4 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST5 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST5) && curdat.nConsts < 5) return !fprintf(stderr, "Error invalid format %s: Append Constant #5 function called, but NO constants, or less than 5 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST6 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST6) && curdat.nConsts < 6) return !fprintf(stderr, "Error invalid format %s: Append Constant #6 function called, but NO constants, or less than 6 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST7 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST7) && curdat.nConsts < 7) return !fprintf(stderr, "Error invalid format %s: Append Constant #7 function called, but NO constants, or less than 7 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_input1_from_CONST8 || Setup->pFuncs[i] == DynamicFunc__append_input2_from_CONST8) && curdat.nConsts < 8) return !fprintf(stderr, "Error invalid format %s: Append Constant #8 function called, but NO constants, or less than 8 constants in the format\n", Setup->szFORMAT_NAME); if ( (Setup->pFuncs[i] == DynamicFunc__append_2nd_salt || Setup->pFuncs[i] == DynamicFunc__append_2nd_salt2) && curdat.b2Salts == 0) return !fprintf(stderr, "Error invalid format %s: A call to one of the 'salt-2' functions, but this format does not have MFG_SALT2 flag set\n", Setup->szFORMAT_NAME); // Ok, if we have made it here, the function is 'currently' still valid. Load this pointer into our array of pointers. pFuncs = ConvertFuncs(Setup->pFuncs[i], &cnt2); #define IS_FUNC_NAME(H,N) if (is##H##Func(pFuncs[x])){ if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME)) pFmt->params.algorithm_name = ALGORITHM_NAME_##N; \ else if (!strcmp(pFmt->params.algorithm_name, ALGORITHM_NAME_X86)) pFmt->params.algorithm_name = ALGORITHM_NAME_X86_##N; } for (x = 0; x < cnt2; ++x) { curdat.dynamic_FUNCTIONS[j++] = pFuncs[x]; if (pFuncs[x] == DynamicFunc__setmode_unicode || pFuncs[x] == DynamicFunc__setmode_unicodeBE) pFmt->params.flags |= FMT_UNICODE; IS_FUNC_NAME(SHA1,S) if (isSHA2_256Func(pFuncs[x])) { #ifdef SIMD_COEF_32 if (curdat.using_flat_buffers_sse2_ok) pFmt->params.algorithm_name = ALGORITHM_NAME_S2_256; else #endif pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_256; } if (isSHA2_512Func(pFuncs[x])) { #ifdef SIMD_COEF_64 if (curdat.using_flat_buffers_sse2_ok) pFmt->params.algorithm_name = ALGORITHM_NAME_S2_512; else #endif pFmt->params.algorithm_name = ALGORITHM_NAME_X86_S2_512; } IS_FUNC_NAME(MD4,4) IS_FUNC_NAME(WHIRL,WP2) IS_FUNC_NAME(GOST,GST2) IS_FUNC_NAME(Tiger,TGR) IS_FUNC_NAME(RIPEMD,RIPEMD) IS_FUNC_NAME(HAVAL,HAVAL) IS_FUNC_NAME(MD2,MD2) IS_FUNC_NAME(PANAMA,PANAMA) IS_FUNC_NAME(SKEIN,SKEIN) // Note, until we add SIMD keccak, one algoithm is all we 'need' IS_FUNC_NAME(KECCAK,KECCAK) // IS_FUNC_NAME(KECCAK,SHA3_256) // IS_FUNC_NAME(KECCAK,SHA3_384) // IS_FUNC_NAME(KECCAK,SHA3_512) // IS_FUNC_NAME(KECCAK,KECCAK_256) // IS_FUNC_NAME(KECCAK,KECCAK_512) // LARGE_HASH_EDIT_POINT (MUST match the just added a new IsXXXFunc() type function) } if (isLargeHashFinalFunc(curdat.dynamic_FUNCTIONS[j-1])) { if (Setup->pFuncs[i+1]) return !fprintf(stderr, "Error invalid format %s: DynamicFunc__LARGE_HASH_crypt_inputX_to_output1_FINAL, can ONLY be used as the last function in a script\n", Setup->szFORMAT_NAME); } } curdat.dynamic_FUNCTIONS[j] = NULL; } if (!Setup->pPreloads || Setup->pPreloads[0].ciphertext == NULL) { return !fprintf(stderr, "Error invalid format %s: Error, no validation hash(s) for this format\n", Setup->szFORMAT_NAME); } cnt = 0; #ifdef _OPENMP dyna_setupOMP(Setup, pFmt); #endif { struct fmt_tests *pfx = mem_alloc_tiny(ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests), MEM_ALIGN_WORD); memset(pfx, 0, ARRAY_COUNT(dynamic_tests) * sizeof (struct fmt_tests)); for (i = 0; cnt < ARRAY_COUNT(dynamic_tests) -1; ++i) { if (Setup->pPreloads[i].ciphertext == NULL) { i = 0; } if (Setup->pPreloads[i].ciphertext[0] == 'A' && Setup->pPreloads[i].ciphertext[1] == '=') { if (options.target_enc != ASCII && options.target_enc != ISO_8859_1) continue; pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]); } else if (Setup->pPreloads[i].ciphertext[0] == 'U' && Setup->pPreloads[i].ciphertext[1] == '=') { if (options.target_enc != UTF_8) continue; pfx[cnt].ciphertext = str_alloc_copy(&Setup->pPreloads[i].ciphertext[2]); } else pfx[cnt].ciphertext = str_alloc_copy(Setup->pPreloads[i].ciphertext); pfx[cnt].plaintext = str_alloc_copy(Setup->pPreloads[i].plaintext); pfx[cnt].fields[0] = Setup->pPreloads[i].fields[0] ? str_alloc_copy(Setup->pPreloads[i].fields[0]) : ""; pfx[cnt].fields[1] = pfx[cnt].ciphertext; for (j = 2; j < 10; ++j) pfx[cnt].fields[j] = Setup->pPreloads[i].fields[j] ? str_alloc_copy(Setup->pPreloads[i].fields[j]) : ""; ++cnt; } pfx[cnt].ciphertext = NULL; pfx[cnt].plaintext = NULL; pFmt->params.tests = pfx; } if (curdat.dynamic_base16_upcase) dynamic_itoa16 = itoa16u; else dynamic_itoa16 = itoa16; { char s[512], *cp; cp = Setup->szFORMAT_NAME; cp = strchr(Setup->szFORMAT_NAME, ' '); ++cp; sprintf(s, "%s %s", cp, pFmt->params.algorithm_name); pFmt->params.algorithm_name = str_alloc_copy(s); } if ((Setup->flags & MGF_SALTED) && !Setup->SaltLen) return !fprintf(stderr, "Error invalid format %s\n\tIt is required to add SaltLen= to the script, for this format\n", Setup->szFORMAT_NAME); return 1; } static int LoadOneFormat(int idx, struct fmt_main *pFmt) { extern struct options_main options; char label[16] = { 0 }, label_id[16] = { 0 }, *cp = NULL; memcpy(pFmt, &fmt_Dynamic, sizeof(struct fmt_main)); // TODO: // NOTE, this was commented out, because the late binding @dynamic=expr@ // hashes were killing out possibly pre-setup input buffers. NOTE, that // things worked fine after this, all self tests do pass, and I am 99% // sure that all of this 'required' cleaning happens in init(). but I am // putting this comment in here, so that if at a later time, there are // problems and are tracked down to this, we will know why. // dynamic_RESET(pFmt); // Ok we need to list this as a dynamic format (even for the 'thin' formats) pFmt->params.flags |= FMT_DYNAMIC; if (idx < 1000) { if (dynamic_RESERVED_PRELOAD_SETUP(idx, pFmt) != 1) return 0; } else { if (dynamic_LOAD_PARSER_FUNCTIONS(idx, pFmt) != 1) return 0; } /* we 'have' to take the sig from the test array. If we do not have */ /* our preload array 'solid', then the idx will not be the proper */ /* number. So we simply grab the label from the test cyphertext string */ strncpy(label, pFmt->params.tests[0].ciphertext, 15); cp = strchr(&label[1], '$'); if (NULL != cp) cp[1] = 0; strcpy(label_id, &label[1]); cp = strchr(label_id, '$'); if (NULL != cp) *cp = 0; // if (!options.format || strncmp(options.format, "dynamic_", 8)) // pFmt->params.label = str_alloc_copy("dynamic"); // else pFmt->params.label = str_alloc_copy(label_id); strcpy(curdat.dynamic_WHICH_TYPE_SIG, label); curdat.dynamic_HASH_OFFSET = strlen(label); if (curdat.dynamic_base64_inout == 1 || curdat.dynamic_base64_inout == 3) { // we have to compute 'proper' offset const char *cp = pFmt->params.tests[0].ciphertext; size_t len = base64_valid_length(&cp[curdat.dynamic_HASH_OFFSET], curdat.dynamic_base64_inout == 1 ? e_b64_crypt : e_b64_mime, flg_Base64_MIME_TRAIL_EQ_CNT, 0); curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + len + 1; } else if (curdat.dynamic_base64_inout == 2) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 16 + 1; else if (curdat.dynamic_40_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 40 + 1; else if (curdat.dynamic_48_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 48 + 1; else if (curdat.dynamic_64_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 64 + 1; else if (curdat.dynamic_56_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 56 + 1; else if (curdat.dynamic_80_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 80 + 1; else if (curdat.dynamic_96_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 96 + 1; else if (curdat.dynamic_128_byte_input) curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 128 + 1; else curdat.dynamic_SALT_OFFSET = curdat.dynamic_HASH_OFFSET + 32 + 1; pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD); memcpy(pFmt->private.data, &curdat, sizeof(private_subformat_data)); if (strncmp(curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext, strlen(curdat.dynamic_WHICH_TYPE_SIG))) { fprintf(stderr, "ERROR, when loading dynamic formats, the wrong curdat item was linked to this type:\nTYPE_SIG=%s\nTest_Dat=%s\n", curdat.dynamic_WHICH_TYPE_SIG, pFmt->params.tests[0].ciphertext); return 0; } return 1; } struct fmt_main *dynamic_Register_local_format(int *type) { int num=nLocalFmts++; private_subformat_data keep; if (!pLocalFmts) pLocalFmts = mem_calloc_tiny(1000*sizeof(struct fmt_main), 16); /* since these are loaded LATE in the process, init() has been called * and we HAVE to preserve the already loaded setup. This will happen * if we run a crack, but do not specify a specific dyna format */ memcpy(&keep, &curdat, sizeof(private_subformat_data)); LoadOneFormat(num+6000, &(pLocalFmts[num])); memcpy(&curdat, &keep, sizeof(private_subformat_data)); dynamic_use_sse = curdat.dynamic_use_sse; force_md5_ctx = curdat.force_md5_ctx; *type = num+6000; return &(pLocalFmts[num]); } int dynamic_Register_formats(struct fmt_main **ptr) { int count, i, idx, single=-1, wildcard = 0, pop[5000]; extern struct options_main options; if (options.format && strstr(options.format, "*")) wildcard = 1; Dynamic_Load_itoa16_w2(); if (!wildcard && options.format && !strncmp(options.format, "dynamic_", 8)) sscanf(options.format, "dynamic_%d", &single); if (options.format && options.subformat && !strcmp(options.format, "dynamic") && !strncmp(options.subformat, "dynamic_", 8)) sscanf(options.subformat, "dynamic_%d", &single); if (options.dynamic_bare_hashes_always_valid == 'Y') dynamic_allow_rawhash_fixup = 1; else if (options.dynamic_bare_hashes_always_valid != 'N' && cfg_get_bool(SECTION_OPTIONS, NULL, "DynamicAlwaysUseBareHashes", 1)) dynamic_allow_rawhash_fixup = 1; if (single != -1) { // user wanted only a 'specific' format. Simply load that one. dynamic_allow_rawhash_fixup = 1; if (dynamic_IS_VALID(single, 1) == 0) return 0; pFmts = mem_alloc_tiny(sizeof(pFmts[0]), MEM_ALIGN_WORD); if (!LoadOneFormat(single, pFmts)) return 0; *ptr = pFmts; return (nFmts = 1); } for (count = i = 0; i < 5000; ++i) { if ((pop[i] = (dynamic_IS_VALID(i, 0) == 1))) ++count; } // Ok, now we know how many formats we have. Load them pFmts = mem_alloc_tiny(sizeof(pFmts[0])*count, MEM_ALIGN_WORD); for (idx = i = 0; i < 5000; ++i) { if (pop[i]) { if (LoadOneFormat(i, &pFmts[idx]) == 0) --count; else ++idx; } } *ptr = pFmts; return (nFmts = count); } /* * finds the 'proper' sub format from the allocated formats, IFF that format 'exists' */ static struct fmt_main *dynamic_Get_fmt_main(int which) { char label[40]; int i; sprintf(label, "$dynamic_%d$", which); for (i = 0; i < nFmts; ++i) { private_subformat_data *pPriv = pFmts[i].private.data; if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label)) return &pFmts[i]; } for (i = 0; i < nLocalFmts; ++i) { private_subformat_data *pPriv = pLocalFmts[i].private.data; if (!strcmp(pPriv->dynamic_WHICH_TYPE_SIG, label)) return &pLocalFmts[i]; } return NULL; } /* * This function will 'forget' which md5-gen subtype we are working with. It will allow * a different type to be used. Very useful for things like -test (benchmarking). */ static void dynamic_RESET(struct fmt_main *fmt) { memset(&curdat, 0, sizeof(curdat)); m_count = 0; keys_dirty = 0; cursalt=cursalt2=username=0; saltlen=saltlen2=usernamelen=0; // make 'sure' we startout with blank inputs. m_count = 0; #ifdef SIMD_COEF_32 if (input_buf) { #else if (input_buf_X86) { #endif __nonMP_DynamicFunc__clean_input_full(); __nonMP_DynamicFunc__clean_input2_full(); } } /* * This will LINK our functions into some other fmt_main struction. That way * that struction can use our code. The other *_fmt.c file will need to * 'override' the valid, the binary and the salt functions, and make changes * to the hash, BEFORE calling into the dynamic valid/binary/salt functions. * Other than those functions (and calling into this linkage function at init time) * that is about all that needs to be in that 'other' *_fmt.c file, as long as the * format is part of the md5-generic 'class' of functions. */ struct fmt_main *dynamic_THIN_FORMAT_LINK(struct fmt_main *pFmt, char *ciphertext, char *orig_sig, int bInitAlso) { int i, valid, nFmtNum; struct fmt_main *pFmtLocal; static char subformat[17], *cp; dynamic_allow_rawhash_fixup = 0; strncpy(subformat, ciphertext, 16); subformat[16] = 0; cp = strchr(&subformat[9], '$'); if (cp) cp[1] = 0; nFmtNum = -1; sscanf(subformat, "$dynamic_%d", &nFmtNum); if (nFmtNum == -1) error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext); pFmtLocal = dynamic_Get_fmt_main(nFmtNum); if (pFmtLocal == NULL) error_msg("Error, Invalid signature line trying to link to dynamic format.\nOriginal format=%s\nSignature line=%s\n", orig_sig, ciphertext); valid = pFmtLocal->methods.valid(ciphertext, pFmtLocal); if (!valid) error_msg("Error, trying to link to %s using ciphertext=%s FAILED\n", subformat, ciphertext); pFmt->params.algorithm_name = pFmtLocal->params.algorithm_name; if (pFmt->params.plaintext_length == 0 || pFmt->params.plaintext_length > pFmtLocal->params.plaintext_length) { pFmt->params.plaintext_length = pFmtLocal->params.plaintext_length; pFmt->params.plaintext_min_length = pFmtLocal->params.plaintext_min_length; } pFmt->params.max_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt; pFmt->params.min_keys_per_crypt = pFmtLocal->params.max_keys_per_crypt; if (pFmt->params.min_keys_per_crypt > 64) pFmt->params.min_keys_per_crypt = 64; pFmt->params.flags = pFmtLocal->params.flags; if (pFmtLocal->params.salt_size) pFmt->params.salt_size = sizeof(void*); else pFmt->params.salt_size = 0; pFmt->methods.cmp_all = pFmtLocal->methods.cmp_all; pFmt->methods.cmp_one = pFmtLocal->methods.cmp_one; pFmt->methods.cmp_exact = pFmtLocal->methods.cmp_exact; for (i = 0; i < FMT_TUNABLE_COSTS; ++i) { pFmt->methods.tunable_cost_value[i] = pFmtLocal->methods.tunable_cost_value[i]; pFmt->params.tunable_cost_name[i] = pFmtLocal->params.tunable_cost_name[i]; } pFmt->methods.source = pFmtLocal->methods.source; pFmt->methods.set_salt = pFmtLocal->methods.set_salt; pFmt->methods.salt = pFmtLocal->methods.salt; pFmt->methods.done = pFmtLocal->methods.done; pFmt->methods.salt_hash = pFmtLocal->methods.salt_hash; pFmt->methods.split = pFmtLocal->methods.split; pFmt->methods.set_key = pFmtLocal->methods.set_key; pFmt->methods.get_key = pFmtLocal->methods.get_key; pFmt->methods.clear_keys = pFmtLocal->methods.clear_keys; pFmt->methods.crypt_all = pFmtLocal->methods.crypt_all; pFmt->methods.prepare = pFmtLocal->methods.prepare; pFmt->methods.salt_compare = pFmtLocal->methods.salt_compare; for (i = 0; i < PASSWORD_HASH_SIZES; ++i) { pFmt->methods.binary_hash[i] = pFmtLocal->methods.binary_hash[i]; pFmt->methods.get_hash[i] = pFmtLocal->methods.get_hash[i]; } if (bInitAlso) { //fprintf(stderr, "dynamic_THIN_FORMAT_LINK() calling init(%s)\n", subformat); init(pFmtLocal); } pFmt->private.data = mem_alloc_tiny(sizeof(private_subformat_data), MEM_ALIGN_WORD); memcpy(pFmt->private.data, pFmtLocal->private.data, sizeof(private_subformat_data)); return pFmtLocal; } // We ONLY deal with hex hashes at this time. Is we later have to deal with // base-64, this will become harder. Before this function we had bugs where // many things were loaded as 'being' valid, even if not. static int looks_like_raw_hash(char *ciphertext, private_subformat_data *pPriv) { int i, cipherTextLen = CIPHERTEXT_LENGTH; if (pPriv->dynamic_40_byte_input) { cipherTextLen = 40; } else if (pPriv->dynamic_48_byte_input) { cipherTextLen = 48; } else if (pPriv->dynamic_64_byte_input) { cipherTextLen = 64; } else if (pPriv->dynamic_56_byte_input) { cipherTextLen = 56; } else if (pPriv->dynamic_80_byte_input) { cipherTextLen = 80; } else if (pPriv->dynamic_96_byte_input) { cipherTextLen = 96; } else if (pPriv->dynamic_128_byte_input) { cipherTextLen = 128; } for (i = 0; i < cipherTextLen; i++) { if (atoi16[ARCH_INDEX(ciphertext[i])] == 0x7f) return 0; } if ((pPriv->pSetup->flags&MGF_SALTED) == 0) { if (!ciphertext[cipherTextLen]) return 1; return 0; } return ciphertext[cipherTextLen] == '$'; } static char *FixupIfNeeded(char *ciphertext, private_subformat_data *pPriv) { if (!ciphertext || *ciphertext == 0 || *ciphertext == '*') return ciphertext; if (dynamic_allow_rawhash_fixup && strncmp(ciphertext, "$dynamic_", 9) && looks_like_raw_hash(ciphertext, pPriv)) { static char __ciphertext[512+24]; if (pPriv->pSetup->flags & MGF_SALTED) { if (!strchr(ciphertext, '$')) return ciphertext; } if ( (pPriv->pSetup->flags & MGF_SALTED2) == MGF_SALTED2) { if (!strstr(ciphertext, "$$2")) return ciphertext; } if ( (pPriv->pSetup->flags & MGF_USERNAME) == MGF_USERNAME) { if (!strstr(ciphertext, "$$U")) return ciphertext; } if (pPriv->FldMask) { int i; for (i = 0; i < 10; ++i) { if ((pPriv->FldMask & (MGF_FLDx_BIT<<i)) == (MGF_FLDx_BIT<<i)) { char Fld[8]; sprintf(Fld, "$$F%d", i); if (!strstr(&ciphertext[pPriv->dynamic_SALT_OFFSET-1], Fld)) return ciphertext; } } } strcpy(__ciphertext, pPriv->dynamic_WHICH_TYPE_SIG); strnzcpy(&__ciphertext[strlen(__ciphertext)], ciphertext, 512); return __ciphertext; } return ciphertext; } int text_in_dynamic_format_already(struct fmt_main *pFmt, char *ciphertext) { private_subformat_data *pPriv; if (!pFmt) return 0; /* NOTE, it 'is' possible to get called here, without the private stuff being setup properly (in valid, etc). So, we simply grab the static private stuff each time */ pPriv = pFmt->private.data; if (!ciphertext || !pPriv) return 0; return !strncmp(ciphertext, pPriv->dynamic_WHICH_TYPE_SIG, strlen(pPriv->dynamic_WHICH_TYPE_SIG)); } // if caseType == 1, return cp // if caseType == 2, return upcase(cp) // if caseType == 3, return locase(cp) // if caseType == 4, return upcaseFirstChar(locase(cp)) static char *HandleCase(char *cp, int caseType) { static UTF8 dest[256]; switch(caseType) { case 1: return cp; case 2: enc_uc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp)); if (!strcmp((char*)dest, cp)) return cp; break; case 3: case 4: enc_lc(dest, sizeof(dest), (unsigned char*)cp, strlen(cp)); if (caseType == 4) dest[0] = low2up_ansi(dest[0]); if (!strcmp((char*)dest, cp)) return cp; break; default: return cp; } return (char*)dest; } int dynamic_real_salt_length(struct fmt_main *pFmt) { if (pFmt->params.flags & FMT_DYNAMIC) { private_subformat_data *pPriv = pFmt->private.data; if (pPriv == NULL || pPriv->pSetup == NULL) return -1; // not a dynamic format, or called before we have loaded them!! return abs(pPriv->pSetup->SaltLen); } // NOT a dynamic format return -1; } #else #warning Notice: Dynamic format disabled from build. #endif /* DYNAMIC_DISABLED */
core_clantr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlantr.c, normal z -> c, Fri Sep 28 17:38:21 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ __attribute__((weak)) void plasma_core_clantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex32_t *A, int lda, float *work, float *value) { // Due to a bug in LAPACKE < 3.6.1, this function always returns zero. // *value = LAPACKE_clantr_work(LAPACK_COL_MAJOR, // lapack_const(norm), lapack_const(uplo), // lapack_const(diag), // m, n, A, lda, work); // Calling LAPACK directly instead. char nrm = lapack_const(norm); char upl = lapack_const(uplo); char dia = lapack_const(diag); *value = LAPACK_clantr(&nrm, &upl, &dia, &m, &n, A, &lda, work); } /******************************************************************************/ void plasma_core_omp_clantr(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex32_t *A, int lda, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:1]) { if (sequence->status == PlasmaSuccess) plasma_core_clantr(norm, uplo, diag, m, n, A, lda, work, value); } } /******************************************************************************/ void plasma_core_omp_clantr_aux(plasma_enum_t norm, plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const plasma_complex32_t *A, int lda, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { switch (norm) { case PlasmaOneNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:n]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int j = 0; j < n; j++) { value[j] = cabsf(A[lda*j]); for (int i = 1; i < imin(j+1, m); i++) { value[j] += cabsf(A[lda*j+i]); } } } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { value[j] = 1.0; for (int i = 0; i < j; i++) { value[j] += cabsf(A[lda*j+i]); } } for (; j < n; j++) { value[j] = cabsf(A[lda*j]); for (int i = 1; i < m; i++) { value[j] += cabsf(A[lda*j+i]); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { int j; for (j = 0; j < imin(n, m); j++) { value[j] = cabsf(A[lda*j+j]); for (int i = j+1; i < m; i++) { value[j] += cabsf(A[lda*j+i]); } } for (; j < n; j++) value[j] = 0.0; } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { value[j] = 1.0; for (int i = j+1; i < m; i++) { value[j] += cabsf(A[lda*j+i]); } } for (; j < n; j++) value[j] = 0.0; } } } } break; case PlasmaInfNorm: #pragma omp task depend(in:A[0:lda*n]) \ depend(out:value[0:m]) { if (sequence->status == PlasmaSuccess) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < n; j++) { for (int i = 0; i < imin(j+1, m); i++) { value[i] += cabsf(A[lda*j+i]); } } } else { // PlasmaUnit int i; for (i = 0; i < imin(m, n); i++) value[i] = 1.0; for (; i < m; i++) value[i] = 0.0; int j; for (j = 0; j < imin(n, m); j++) { for (i = 0; i < j; i++) { value[i] += cabsf(A[lda*j+i]); } } for (; j < n; j++) { for (i = 0; i < m; i++) { value[i] += cabsf(A[lda*j+i]); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { for (int i = 0; i < m; i++) value[i] = 0.0; for (int j = 0; j < imin(n, m); j++) { for (int i = j; i < m; i++) { value[i] += cabsf(A[lda*j+i]); } } } else { // PlasmaUnit int i; for (i = 0; i < imin(m, n); i++) value[i] = 1.0; for (; i < m; i++) value[i] = 0.0; for (int j = 0; j < imin(n, m); j++) { for (i = j+1; i < m; i++) { value[i] += cabsf(A[lda*j+i]); } } } } } } break; } }
mm-omp.c
/** * * Matrix Multiplication - Shared-memory (OpenMP) * * CS3210 * **/ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <assert.h> #include <omp.h> #include <xmmintrin.h> int size; int threads; typedef struct { float ** element; } matrix; long long wall_clock_time() { #ifdef LINUX struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll); #else struct timeval tv; gettimeofday(&tv, NULL); return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll); #endif } /** * Allocates memory for a matrix of size SIZE * The memory is allocated row-major order, i.e. * elements from the same row are allocated at contiguous * memory addresses. **/ void allocate_matrix(matrix* m) { int i; // allocate array for all the rows m->element = (float**)malloc(sizeof(float*) * size); if (m->element == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } // allocate an array for each row of the matrix for (i = 0; i < size; i++) { m->element[i] = (float*)malloc(sizeof(float) * size); if (m->element[i] == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } } } /** * Free the memory allocated to a matrix. **/ void free_matrix(matrix* m) { int i; for (i = 0; i < size; i++) { free(m->element[i]); } free(m->element); } /** * Initializes the elements of the matrix with * random values between 0 and 9 **/ void init_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = rand() % 10; } } /** * Initializes the elements of the matrix with * element 0. **/ void init_matrix_zero(matrix m) { int i, j; for (i = 0; i < size; i++) for (j = 0; j < size; j++) { m.element[i][j] = 0.0; } } /** * Multiplies matrix @a with matrix @b storing * the result in matrix @result * * The multiplication algorithm is the O(n^3) * algorithm */ void mm(matrix a, matrix b, matrix result) { int i, j, k; // Parallelize the multiplication // Each thread will work on one iteration of the outer-most loop // Variables are shared among threads (a, b, result) // and each thread has its own private copy (i, j, k) #pragma omp parallel for shared(a, b, result) private (i, j, k) for (i = 0; i < size; i++) for (j = 0; j < size; j++) for(k = 0; k < size; k++) result.element[i][j] += a.element[i][k] * b.element[k][j]; } void print_matrix(matrix m) { int i, j; for (i = 0; i < size; i++) { printf("row %4d: ", i); for (j = 0; j < size; j++) printf("%6.2f ", m.element[i][j]); printf("\n"); } } void work() { matrix a, b, result; long long before, after; // Allocate memory for matrices allocate_matrix(&a); allocate_matrix(&b); allocate_matrix(&result); // Initialize matrix elements init_matrix(a); init_matrix(b); // Perform parallel matrix multiplication before = wall_clock_time(); mm(a, b, result); after = wall_clock_time(); fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000); // Print the result matrix // print_matrix(result); } int main(int argc, char ** argv) { srand(0); printf("Usage: %s <size> <threads>\n", argv[0]); if (argc >= 2) size = atoi(argv[1]); else size = 1024; if (argc >= 3) threads = atoi(argv[2]); else threads = -1; // Multiply the matrices if (threads != -1) { omp_set_num_threads(threads); } #pragma omp parallel { threads = omp_get_num_threads(); } printf("Matrix multiplication of size %d using %d threads\n", size, threads); work(); return 0; }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, typename DType, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); }); // lhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); }); } // rhs grad if (req[1] != kNullOp) { MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); }); // rhs in-place MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); }); } } protected: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename DType, typename IType, typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename DType, typename IType, typename CType, typename OP> static inline void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static inline void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, dispatch_ex); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] != kNullOp) { Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); }); }); } } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1; MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); }); }); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, { MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { CsrCsrOp<DType, IType, CType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); }); }); }); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, { RspRspOp<DType, IType, OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); }); }); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, { BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); }); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
ex3.c
#include <stdio.h> #include <omp.h> static long num_steps = 100000; double step; int main() { double pi, sum; double x; int i, ts_num; step = 1.0/(double)num_steps; double start = omp_get_wtime(); #pragma omp parallel for reduction(+:sum) for(i=0;i < num_steps;i++) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); } // end of OMP PARALLEL printf("%f", omp_get_wtime()-start); pi = sum*step; printf("pi is %f\n", pi); }
Example_mem_model.1.c
/* * @@name: mem_model.1c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: rt-error * @@version: omp_3.1 */ #include <stdio.h> #include <omp.h> int main(){ int x; x = 2; #pragma omp parallel num_threads(2) shared(x) { if (omp_get_thread_num() == 0) { #pragma omp atomic write x = 5; } else { int xval; #pragma omp atomic read xval = x; /* Print 1: xval can be 2 or 5 */ printf("1: Thread# %d: x = %d\n", omp_get_thread_num(), xval); } #pragma omp barrier if (omp_get_thread_num() == 0) { /* Print 2 */ printf("2: Thread# %d: x = %d\n", omp_get_thread_num(), x); } else { /* Print 3 */ printf("3: Thread# %d: x = %d\n", omp_get_thread_num(), x); } } return 0; }
BKTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_BKTREE_H_ #define _SPTAG_COMMON_BKTREE_H_ #include <stack> #include <string> #include <vector> #include <shared_mutex> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #include "Dataset.h" #include "DistanceUtils.h" namespace SPTAG { namespace COMMON { // node type for storing BKT struct BKTNode { SizeType centerid; SizeType childStart; SizeType childEnd; BKTNode(SizeType cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {} }; template <typename T> struct KmeansArgs { int _K; int _DK; DimensionType _D; int _T; DistCalcMethod _M; T* centers; T* newTCenters; SizeType* counts; float* newCenters; SizeType* newCounts; int* label; SizeType* clusterIdx; float* clusterDist; float* weightedCounts; float* newWeightedCounts; float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length); KmeansArgs(int k, DimensionType dim, SizeType datasize, int threadnum, DistCalcMethod distMethod) : _K(k), _DK(k), _D(dim), _T(threadnum), _M(distMethod) { centers = (T*)_mm_malloc(sizeof(T) * k * dim, ALIGN_SPTAG); newTCenters = (T*)_mm_malloc(sizeof(T) * k * dim, ALIGN_SPTAG); counts = new SizeType[k]; newCenters = new float[threadnum * k * dim]; newCounts = new SizeType[threadnum * k]; label = new int[datasize]; clusterIdx = new SizeType[threadnum * k]; clusterDist = new float[threadnum * k]; weightedCounts = new float[k]; newWeightedCounts = new float[threadnum * k]; fComputeDistance = COMMON::DistanceCalcSelector<T>(distMethod); } ~KmeansArgs() { _mm_free(centers); _mm_free(newTCenters); delete[] counts; delete[] newCenters; delete[] newCounts; delete[] label; delete[] clusterIdx; delete[] clusterDist; delete[] weightedCounts; delete[] newWeightedCounts; } inline void ClearCounts() { memset(newCounts, 0, sizeof(SizeType) * _T * _K); memset(newWeightedCounts, 0, sizeof(float) * _T * _K); } inline void ClearCenters() { memset(newCenters, 0, sizeof(float) * _T * _K * _D); } inline void ClearDists(float dist) { for (int i = 0; i < _T * _K; i++) { clusterIdx[i] = -1; clusterDist[i] = dist; } } void Shuffle(std::vector<SizeType>& indices, SizeType first, SizeType last) { SizeType* pos = new SizeType[_K]; pos[0] = first; for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1]; for (int k = 0; k < _K; k++) { if (newCounts[k] == 0) continue; SizeType i = pos[k]; while (newCounts[k] > 0) { SizeType swapid = pos[label[i]] + newCounts[label[i]] - 1; newCounts[label[i]]--; std::swap(indices[i], indices[swapid]); std::swap(label[i], label[swapid]); } while (indices[i] != clusterIdx[k]) i++; std::swap(indices[i], indices[pos[k] + counts[k] - 1]); } delete[] pos; } }; template <typename T> float RefineCenters(const Dataset<T>& data, KmeansArgs<T>& args) { int maxcluster = -1; SizeType maxCount = 0; for (int k = 0; k < args._DK; k++) { if (args.counts[k] > maxCount && args.newCounts[k] > 0 && DistanceUtils::ComputeDistance((T*)data[args.clusterIdx[k]], args.centers + k * args._D, args._D, DistCalcMethod::L2) > 1e-6) { maxcluster = k; maxCount = args.counts[k]; } } if (maxcluster != -1 && (args.clusterIdx[maxcluster] < 0 || args.clusterIdx[maxcluster] >= data.R())) LOG(Helper::LogLevel::LL_Debug, "maxcluster:%d(%d) Error dist:%f\n", maxcluster, args.newCounts[maxcluster], args.clusterDist[maxcluster]); float diff = 0; for (int k = 0; k < args._DK; k++) { T* TCenter = args.newTCenters + k * args._D; if (args.counts[k] == 0) { if (maxcluster != -1) { //int nextid = Utils::rand_int(last, first); //while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first); SizeType nextid = args.clusterIdx[maxcluster]; std::memcpy(TCenter, data[nextid], sizeof(T)*args._D); } else { std::memcpy(TCenter, args.centers + k * args._D, sizeof(T)*args._D); } } else { float* currCenters = args.newCenters + k * args._D; for (DimensionType j = 0; j < args._D; j++) currCenters[j] /= args.counts[k]; if (args._M == DistCalcMethod::Cosine) { COMMON::Utils::Normalize(currCenters, args._D, COMMON::Utils::GetBase<T>()); } for (DimensionType j = 0; j < args._D; j++) TCenter[j] = (T)(currCenters[j]); } diff += args.fComputeDistance(args.centers + k*args._D, TCenter, args._D); } return diff; } template <typename T> inline float KmeansAssign(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, const bool updateCenters, float lambda) { float currDist = 0; SizeType subsize = (last - first - 1) / args._T + 1; #pragma omp parallel for num_threads(args._T) shared(data, indices) reduction(+:currDist) for (int tid = 0; tid < args._T; tid++) { SizeType istart = first + tid * subsize; SizeType iend = min(first + (tid + 1) * subsize, last); SizeType *inewCounts = args.newCounts + tid * args._K; float *inewCenters = args.newCenters + tid * args._K * args._D; SizeType * iclusterIdx = args.clusterIdx + tid * args._K; float * iclusterDist = args.clusterDist + tid * args._K; float * iweightedCounts = args.newWeightedCounts + tid * args._K; float idist = 0; for (SizeType i = istart; i < iend; i++) { int clusterid = 0; float smallestDist = MaxDist; for (int k = 0; k < args._DK; k++) { float dist = args.fComputeDistance(data[indices[i]], args.centers + k*args._D, args._D) + lambda*args.counts[k]; if (dist > -MaxDist && dist < smallestDist) { clusterid = k; smallestDist = dist; } } args.label[i] = clusterid; inewCounts[clusterid]++; iweightedCounts[clusterid] += smallestDist; idist += smallestDist; if (updateCenters) { const T* v = (const T*)data[indices[i]]; float* center = inewCenters + clusterid*args._D; for (DimensionType j = 0; j < args._D; j++) center[j] += v[j]; if (smallestDist > iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } else { if (smallestDist <= iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } } currDist += idist; } for (int i = 1; i < args._T; i++) { for (int k = 0; k < args._DK; k++) { args.newCounts[k] += args.newCounts[i * args._K + k]; args.newWeightedCounts[k] += args.newWeightedCounts[i * args._K + k]; } } if (updateCenters) { for (int i = 1; i < args._T; i++) { float* currCenter = args.newCenters + i*args._K*args._D; for (size_t j = 0; j < ((size_t)args._DK) * args._D; j++) args.newCenters[j] += currCenter[j]; for (int k = 0; k < args._DK; k++) { if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] > args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*args._K + k]; args.clusterIdx[k] = args.clusterIdx[i*args._K + k]; } } } } else { for (int i = 1; i < args._T; i++) { for (int k = 0; k < args._DK; k++) { if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] <= args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*args._K + k]; args.clusterIdx[k] = args.clusterIdx[i*args._K + k]; } } } } return currDist; } template <typename T> inline float InitCenters(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, int samples, int tryIters) { SizeType batchEnd = min(first + samples, last); float lambda, currDist, minClusterDist = MaxDist; for (int numKmeans = 0; numKmeans < tryIters; numKmeans++) { for (int k = 0; k < args._DK; k++) { SizeType randid = COMMON::Utils::rand(last, first); std::memcpy(args.centers + k*args._D, data[indices[randid]], sizeof(T)*args._D); } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(data, indices, first, batchEnd, args, false, 0); if (currDist < minClusterDist) { minClusterDist = currDist; memcpy(args.newTCenters, args.centers, sizeof(T)*args._K*args._D); memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); SizeType maxCluster = 0; for (int k = 1; k < args._DK; k++) if (args.counts[k] > args.counts[maxCluster]) maxCluster = k; float avgDist = args.newWeightedCounts[maxCluster] / args.counts[maxCluster]; lambda = (avgDist - args.clusterDist[maxCluster]) / args.counts[maxCluster]; if (lambda < 0) lambda = 0; } } return lambda; } template <typename T> float TryClustering(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, int samples = 1000, float lambdaFactor = 100.0f, bool debug = false, IAbortOperation* abort = nullptr) { float adjustedLambda = InitCenters(data, indices, first, last, args, samples, 3); if (abort && abort->ShouldAbort()) return 0; SizeType batchEnd = min(first + samples, last); float currDiff, currDist, minClusterDist = MaxDist; int noImprovement = 0; float originalLambda = COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / lambdaFactor / (batchEnd - first); for (int iter = 0; iter < 100; iter++) { std::memcpy(args.centers, args.newTCenters, sizeof(T)*args._K*args._D); std::random_shuffle(indices.begin() + first, indices.begin() + last); args.ClearCenters(); args.ClearCounts(); args.ClearDists(-MaxDist); currDist = KmeansAssign(data, indices, first, batchEnd, args, true, min(adjustedLambda, originalLambda)); std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); if (currDist < minClusterDist) { noImprovement = 0; minClusterDist = currDist; } else { noImprovement++; } currDiff = RefineCenters(data, args); //if (debug) LOG(Helper::LogLevel::LL_Info, "iter %d dist:%f diff:%f\n", iter, currDist, currDiff); if (abort && abort->ShouldAbort()) return 0; if (currDiff < 1e-3 || noImprovement >= 5) break; } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(data, indices, first, last, args, false, 0); std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); SizeType maxCount = 0, minCount = (std::numeric_limits<SizeType>::max)(), availableClusters = 0; float CountStd = 0.0, CountAvg = (last - first) * 1.0f / args._DK; for (int i = 0; i < args._DK; i++) { if (args.counts[i] > maxCount) maxCount = args.counts[i]; if (args.counts[i] < minCount) minCount = args.counts[i]; CountStd += (args.counts[i] - CountAvg) * (args.counts[i] - CountAvg); if (args.counts[i] > 0) availableClusters++; } CountStd = sqrt(CountStd / args._DK) / CountAvg; if (debug) LOG(Helper::LogLevel::LL_Info, "Lambda:min(%g,%g) Max:%d Min:%d Avg:%f Std/Avg:%f Dist:%f NonZero/Total:%d/%d\n", originalLambda, adjustedLambda, maxCount, minCount, CountAvg, CountStd, currDist, availableClusters, args._DK); return CountStd; } template <typename T> float DynamicFactorSelect(const Dataset<T> & data, std::vector<SizeType> & indices, const SizeType first, const SizeType last, KmeansArgs<T> & args, int samples = 1000) { float bestLambdaFactor = 100.0f, bestCountStd = (std::numeric_limits<float>::max)(); for (float lambdaFactor = 0.001f; lambdaFactor <= 1000.0f + 1e-3; lambdaFactor *= 10) { float CountStd = TryClustering(data, indices, first, last, args, samples, lambdaFactor, true); if (CountStd < bestCountStd) { bestLambdaFactor = lambdaFactor; bestCountStd = CountStd; } } /* std::vector<float> tries(16, 0); for (int i = 0; i < 8; i++) { tries[i] = bestLambdaFactor * (i + 2) / 10; tries[8 + i] = bestLambdaFactor * (i + 2); } for (float lambdaFactor : tries) { float CountStd = TryClustering(data, indices, first, last, args, samples, lambdaFactor, true); if (CountStd < bestCountStd) { bestLambdaFactor = lambdaFactor; bestCountStd = CountStd; } } */ LOG(Helper::LogLevel::LL_Info, "Best Lambda Factor:%f\n", bestLambdaFactor); return bestLambdaFactor; } template <typename T> int KmeansClustering(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, int samples = 1000, float lambdaFactor = 100.0f, bool debug = false, IAbortOperation* abort = nullptr) { TryClustering(data, indices, first, last, args, samples, lambdaFactor, debug, abort); if (abort && abort->ShouldAbort()) return 1; int numClusters = 0; for (int i = 0; i < args._K; i++) if (args.counts[i] > 0) numClusters++; if (numClusters <= 1) return numClusters; args.Shuffle(indices, first, last); return numClusters; } class BKTree { public: BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000), m_fBalanceFactor(-1.0f), m_lock(new std::shared_timed_mutex) {} BKTree(const BKTree& other): m_iTreeNumber(other.m_iTreeNumber), m_iBKTKmeansK(other.m_iBKTKmeansK), m_iBKTLeafSize(other.m_iBKTLeafSize), m_iSamples(other.m_iSamples), m_fBalanceFactor(other.m_fBalanceFactor), m_lock(new std::shared_timed_mutex) {} ~BKTree() {} inline const BKTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline BKTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline SizeType sizePerTree() const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back(); } inline const std::unordered_map<SizeType, SizeType>& GetSampleMap() const { return m_pSampleCenterMap; } template <typename T> void Rebuild(const Dataset<T>& data, DistCalcMethod distMethod, IAbortOperation* abort) { BKTree newTrees(*this); newTrees.BuildTrees<T>(data, distMethod, 1, nullptr, nullptr, false, abort); std::unique_lock<std::shared_timed_mutex> lock(*m_lock); m_pTreeRoots.swap(newTrees.m_pTreeRoots); m_pTreeStart.swap(newTrees.m_pTreeStart); m_pSampleCenterMap.swap(newTrees.m_pSampleCenterMap); } template <typename T> void BuildTrees(const Dataset<T>& data, DistCalcMethod distMethod, int numOfThreads, std::vector<SizeType>* indices = nullptr, std::vector<SizeType>* reverseIndices = nullptr, bool dynamicK = false, IAbortOperation* abort = nullptr) { struct BKTStackItem { SizeType index, first, last; bool debug; BKTStackItem(SizeType index_, SizeType first_, SizeType last_, bool debug_ = false) : index(index_), first(first_), last(last_), debug(debug_) {} }; std::stack<BKTStackItem> ss; std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(data.R()); for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } KmeansArgs<T> args(m_iBKTKmeansK, data.C(), (SizeType)localindices.size(), numOfThreads, distMethod); if (m_fBalanceFactor < 0) m_fBalanceFactor = DynamicFactorSelect(data, localindices, 0, (SizeType)localindices.size(), args, m_iSamples); m_pSampleCenterMap.clear(); for (char i = 0; i < m_iTreeNumber; i++) { std::random_shuffle(localindices.begin(), localindices.end()); m_pTreeStart.push_back((SizeType)m_pTreeRoots.size()); m_pTreeRoots.emplace_back((SizeType)localindices.size()); LOG(Helper::LogLevel::LL_Info, "Start to build BKTree %d\n", i + 1); ss.push(BKTStackItem(m_pTreeStart[i], 0, (SizeType)localindices.size(), true)); while (!ss.empty()) { if (abort && abort->ShouldAbort()) return; BKTStackItem item = ss.top(); ss.pop(); SizeType newBKTid = (SizeType)m_pTreeRoots.size(); m_pTreeRoots[item.index].childStart = newBKTid; if (item.last - item.first <= m_iBKTLeafSize) { for (SizeType j = item.first; j < item.last; j++) { SizeType cid = (reverseIndices == nullptr)? localindices[j]: reverseIndices->at(localindices[j]); m_pTreeRoots.emplace_back(cid); } } else { // clustering the data into BKTKmeansK clusters if (dynamicK) { args._DK = std::min<int>((item.last - item.first) / m_iBKTLeafSize + 1, m_iBKTKmeansK); args._DK = std::max<int>(args._DK, 2); } int numClusters = KmeansClustering(data, localindices, item.first, item.last, args, m_iSamples, m_fBalanceFactor, item.debug, abort); if (numClusters <= 1) { SizeType end = min(item.last + 1, (SizeType)localindices.size()); std::sort(localindices.begin() + item.first, localindices.begin() + end); m_pTreeRoots[item.index].centerid = (reverseIndices == nullptr) ? localindices[item.first] : reverseIndices->at(localindices[item.first]); m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart; for (SizeType j = item.first + 1; j < end; j++) { SizeType cid = (reverseIndices == nullptr) ? localindices[j] : reverseIndices->at(localindices[j]); m_pTreeRoots.emplace_back(cid); m_pSampleCenterMap[cid] = m_pTreeRoots[item.index].centerid; } m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index; } else { SizeType maxCount = 0; for (int k = 0; k < m_iBKTKmeansK; k++) if (args.counts[k] > maxCount) maxCount = args.counts[k]; for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.counts[k] == 0) continue; SizeType cid = (reverseIndices == nullptr) ? localindices[item.first + args.counts[k] - 1] : reverseIndices->at(localindices[item.first + args.counts[k] - 1]); m_pTreeRoots.emplace_back(cid); if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1, item.debug && (args.counts[k] == maxCount))); item.first += args.counts[k]; } } } m_pTreeRoots[item.index].childEnd = (SizeType)m_pTreeRoots.size(); } m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "%d BKTree built, %zu %zu\n", i + 1, m_pTreeRoots.size() - m_pTreeStart[i], localindices.size()); } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(BKTNode) * m_pTreeRoots.size(); } ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); IOBINARY(p_out, WriteBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); LOG(Helper::LogLevel::LL_Info, "Save BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode SaveTrees(std::string sTreeFileName) const { LOG(Helper::LogLevel::LL_Info, "Save BKT to %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveTrees(ptr); } ErrorCode LoadTrees(char* pBKTMemFile) { m_iTreeNumber = *((int*)pBKTMemFile); pBKTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(SizeType) * m_iTreeNumber); pBKTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pBKTMemFile); pBKTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize); if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input) { IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); m_pTreeStart.resize(m_iTreeNumber); IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize; IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); m_pTreeRoots.resize(treeNodeSize); IOBINARY(p_input, ReadBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::string sTreeFileName) { LOG(Helper::LogLevel::LL_Info, "Load BKT From %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile; return LoadTrees(ptr); } template <typename T> void InitSearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (char i = 0; i < m_iTreeNumber; i++) { const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]]; if (node.childStart < 0) { p_space.m_SPTQueue.insert(NodeDistPair(m_pTreeStart[i], fComputeDistance(p_query.GetQuantizedTarget(), data[node.centerid], data.C()))); } else { for (SizeType begin = node.childStart; begin < node.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(NodeDistPair(begin, fComputeDistance(p_query.GetQuantizedTarget(), data[index], data.C()))); } } } } template <typename T> void SearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty()) { NodeDistPair bcell = p_space.m_SPTQueue.pop(); const BKTNode& tnode = m_pTreeRoots[bcell.node]; if (tnode.childStart < 0) { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_iNumberOfCheckedLeaves++; p_space.m_NGQueue.insert(NodeDistPair(tnode.centerid, bcell.distance)); } if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break; } else { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_NGQueue.insert(NodeDistPair(tnode.centerid, bcell.distance)); } for (SizeType begin = tnode.childStart; begin < tnode.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(NodeDistPair(begin, fComputeDistance(p_query.GetQuantizedTarget(), data[index], data.C()))); } } } } private: std::vector<SizeType> m_pTreeStart; std::vector<BKTNode> m_pTreeRoots; std::unordered_map<SizeType, SizeType> m_pSampleCenterMap; public: std::unique_ptr<std::shared_timed_mutex> m_lock; int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples; float m_fBalanceFactor; }; } } #endif
dmml.c
/*! @copyright (c) 2017 King Abdullah University of Science and * Technology (KAUST). All rights reserved. * * STARS-H is a software package, provided by King Abdullah * University of Science and Technology (KAUST) * * @file src/backends/mpi/blrm/dmml.c * @version 0.3.0 * @author Aleksandr Mikhalev * @date 2017-11-07 * */ #include "common.h" #include "starsh.h" #include "starsh-mpi.h" int starsh_blrm__dmml_mpi(STARSH_blrm *matrix, int nrhs, double alpha, double *A, int lda, double beta, double *B, int ldb) //! Multiply blr-matrix by dense matrix on MPI nodes. /*! Performs `C=alpha*A*B+beta*C` with @ref STARSH_blrm `A` and dense matrices * `B` and `C`. All the integer types are int, since they are used in BLAS * calls. * * @param[in] matrix: Pointer to @ref STARSH_blrm object. * @param[in] nrhs: Number of right hand sides. * @param[in] alpha: Scalar mutliplier. * @param[in] A: Dense matrix, right havd side. * @param[in] lda: Leading dimension of `A`. * @param[in] beta: Scalar multiplier. * @param[in] B: Resulting dense matrix. * @param[in] ldb: Leading dimension of B. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrm *M = matrix; STARSH_blrf *F = M->format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nrows = P->shape[0]; STARSH_int ncols = P->shape[P->ndim-1]; // Shorcuts to information about clusters STARSH_cluster *R = F->row_cluster; STARSH_cluster *C = F->col_cluster; void *RD = R->data, *CD = C->data; // Number of far-field and near-field blocks STARSH_int nblocks_far_local = F->nblocks_far_local; STARSH_int nblocks_near_local = F->nblocks_near_local; STARSH_int lbi; char symm = F->symm; int maxrank = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(maxrank < M->far_rank[lbi]) maxrank = M->far_rank[lbi]; STARSH_int maxnb = nrows/F->nbrows; int mpi_size, mpi_rank; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); for(int i = 0; i < nrhs; i++) MPI_Bcast(A+i*lda, ncols, MPI_DOUBLE, 0, MPI_COMM_WORLD); double *temp_D, *temp_B; int num_threads; #ifdef OPENMP #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); #else num_threads = 1; #endif if(M->onfly == 0) { STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank); } else { STARSH_MALLOC(temp_D, num_threads*maxnb*maxnb); } STARSH_MALLOC(temp_B, num_threads*nrhs*nrows); // Setting temp_B=beta*B for master thread of root node and B=0 otherwise #pragma omp parallel { #ifdef OPENMP double *out = temp_B+omp_get_thread_num()*nrhs*nrows; #else double *out = temp_B; #endif for(size_t j = 0; j < nrhs*(size_t)nrows; j++) out[j] = 0.; } if(beta != 0. && mpi_rank == 0) #pragma omp parallel for schedule(static) for(STARSH_int i = 0; i < nrows; i++) for(STARSH_int j = 0; j < nrhs; j++) temp_B[j*ldb+i] = beta*B[j*ldb+i]; int ldout = nrows; // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = F->block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = F->block_far[2*bi]; STARSH_int j = F->block_far[2*bi+1]; // Get sizes and rank int nrows = R->size[i]; int ncols = C->size[j]; int rank = M->far_rank[lbi]; if(rank == 0) continue; // Get pointers to data buffers double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data; int info = 0; #ifdef OPENMP double *D = temp_D+omp_get_thread_num()*nrhs*maxrank; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; #else double *D = temp_D; double *out = temp_B; #endif // Multiply low-rank matrix in U*V^T format by a dense matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, rank, alpha, U, nrows, D, rank, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Multiply low-rank matrix in V*U^T format by a dense matrix // U and V are simply swapped in case of symmetric block cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, nrows, 1.0, U, nrows, A+R->start[i], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, ncols, nrhs, rank, alpha, V, ncols, D, rank, 1.0, out+C->start[j], ldout); } } if(M->onfly == 1) // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_near_local; lbi++) { STARSH_int bi = F->block_near_local[lbi]; // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; int info = 0; #ifdef OPENMP double *D = temp_D+omp_get_thread_num()*maxnb*maxnb; double *out = temp_B+omp_get_thread_num()*nrhs*ldout; #else double *D = temp_D; double *out = temp_B; #endif // Fill temporary buffer with elements of corresponding block kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } else // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_near_local; lbi++) { STARSH_int bi = F->block_near_local[lbi]; // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Get pointers to data buffers double *D = M->near_D[lbi]->data; #ifdef OPENMP double *out = temp_B+omp_get_thread_num()*nrhs*ldout; #else double *out = temp_B; #endif // Multiply 2 dense matrices cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, out+R->start[i], ldout); if(i != j && symm == 'S') { // Repeat in case of symmetric matrix cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, ncols, nrhs, nrows, alpha, D, nrows, A+R->start[i], lda, 1.0, out+C->start[j], ldout); } } // Reduce result to temp_B, corresponding to master openmp thread #pragma omp parallel for schedule(static) for(int i = 0; i < ldout; i++) for(int j = 0; j < nrhs; j++) for(int k = 1; k < num_threads; k++) temp_B[j*(size_t)ldout+i] += temp_B[(k*(size_t)nrhs+j)*ldout+i]; // Since I keep result only on root node, following code is commented //for(int i = 0; i < nrhs; i++) // MPI_Allreduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, // MPI_COMM_WORLD); for(int i = 0; i < nrhs; i++) MPI_Reduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); free(temp_B); free(temp_D); return STARSH_SUCCESS; } int starsh_blrm__dmml_mpi_tlr(STARSH_blrm *matrix, int nrhs, double alpha, double *A, int lda, double beta, double *B, int ldb) //! Multiply blr-matrix by dense matrix on MPI nodes. /*! Performs `C=alpha*A*B+beta*C` with @ref STARSH_blrm `A` and dense matrices * `B` and `C`. All the integer types are int, since they are used in BLAS * calls. Block-wise low-rank matrix `A` is in TLR format. * * @param[in] matrix: Pointer to @ref STARSH_blrm object. * @param[in] nrhs: Number of right hand sides. * @param[in] alpha: Scalar mutliplier. * @param[in] A: Dense matrix, right havd side. * @param[in] lda: Leading dimension of `A`. * @param[in] beta: Scalar multiplier. * @param[in] B: Resulting dense matrix. * @param[in] ldb: Leading dimension of B. * @return Error code @ref STARSH_ERRNO. * @ingroup blrm * */ { STARSH_blrm *M = matrix; STARSH_blrf *F = M->format; STARSH_problem *P = F->problem; STARSH_kernel *kernel = P->kernel; STARSH_int nrows = P->shape[0]; STARSH_int ncols = P->shape[P->ndim-1]; // Shorcuts to information about clusters STARSH_cluster *R = F->row_cluster; STARSH_cluster *C = F->col_cluster; void *RD = R->data, *CD = C->data; // Number of far-field and near-field blocks STARSH_int nblocks_far_local = F->nblocks_far_local; STARSH_int nblocks_near_local = F->nblocks_near_local; STARSH_int lbi; char symm = F->symm; int maxrank = 0; for(lbi = 0; lbi < nblocks_far_local; lbi++) if(maxrank < M->far_rank[lbi]) maxrank = M->far_rank[lbi]; STARSH_int maxnb = nrows/F->nbrows; int mpi_rank, mpi_size; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); int grid_nx = sqrt(mpi_size), grid_ny = grid_nx, grid_x, grid_y; if(grid_nx*grid_ny != mpi_size) STARSH_ERROR("MPI SIZE MUST BE SQUARE OF INTEGER!"); grid_ny = mpi_size / grid_nx; grid_x = mpi_rank / grid_nx; grid_y = mpi_rank % grid_nx; MPI_Group mpi_leadingx_group, mpi_leadingy_group, mpi_world_group; MPI_Comm mpi_splitx, mpi_splity, mpi_leadingx, mpi_leadingy; MPI_Comm_group(MPI_COMM_WORLD, &mpi_world_group); int group_rank[grid_nx]; for(int i = 0; i < grid_ny; i++) group_rank[i] = i; MPI_Group_incl(mpi_world_group, grid_ny, group_rank, &mpi_leadingy_group); MPI_Comm_create_group(MPI_COMM_WORLD, mpi_leadingy_group, 0, &mpi_leadingy); for(int i = 0; i < grid_nx; i++) group_rank[i] = i*grid_ny; MPI_Group_incl(mpi_world_group, grid_nx, group_rank, &mpi_leadingx_group); MPI_Comm_create_group(MPI_COMM_WORLD, mpi_leadingx_group, 0, &mpi_leadingx); MPI_Comm_split(MPI_COMM_WORLD, grid_x, mpi_rank, &mpi_splitx); MPI_Comm_split(MPI_COMM_WORLD, grid_y, mpi_rank, &mpi_splity); int mpi_leadingx_rank=-1, mpi_leadingx_size=-1; int mpi_leadingy_rank=-1, mpi_leadingy_size=-1; int mpi_splitx_rank, mpi_splitx_size; int mpi_splity_rank, mpi_splity_size; if(mpi_leadingx != MPI_COMM_NULL) { MPI_Comm_rank(mpi_leadingx, &mpi_leadingx_rank); MPI_Comm_size(mpi_leadingx, &mpi_leadingx_size); } if(mpi_leadingy != MPI_COMM_NULL) { MPI_Comm_rank(mpi_leadingy, &mpi_leadingy_rank); MPI_Comm_size(mpi_leadingy, &mpi_leadingy_size); } MPI_Comm_rank(mpi_splitx, &mpi_splitx_rank); MPI_Comm_size(mpi_splitx, &mpi_splitx_size); MPI_Comm_rank(mpi_splity, &mpi_splity_rank); MPI_Comm_size(mpi_splity, &mpi_splity_size); /* STARSH_WARNING("MPI: GLOBAL=%d/%d LEADX=%d/%d LEADY=%d/%d SPLITX=%d/%d " "SPLITY=%d/%d", mpi_rank, mpi_size, mpi_leadingx_rank, mpi_leadingx_size, mpi_leadingy_rank, mpi_leadingy_size, mpi_splitx_rank, mpi_splitx_size, mpi_splity_rank, mpi_splity_size); */ int grid_block_size = maxnb*grid_nx; int ld_temp_A = (F->nbcols+grid_nx-1-grid_x)/grid_nx*maxnb; double *temp_A; STARSH_MALLOC(temp_A, nrhs*(size_t)ld_temp_A); if(mpi_leadingx != MPI_COMM_NULL) { for(STARSH_int i = 0; i < F->nbcols/grid_nx; i++) { double *src = A+i*grid_block_size; double *recv = temp_A+i*maxnb; for(int j = 0; j < nrhs; j++) { MPI_Scatter(src+j*(size_t)lda, maxnb, MPI_DOUBLE, recv+j*(size_t)ld_temp_A, maxnb, MPI_DOUBLE, 0, mpi_leadingx); } } STARSH_int i = F->nbcols/grid_nx; int remain = F->nbcols-i*grid_nx; if(remain > 0) { double *src = A+i*(size_t)grid_block_size; double *recv = temp_A+i*(size_t)maxnb; if(mpi_rank == 0) { int sendcounts[grid_nx], displs[grid_nx]; for(int j = 0; j < remain; j++) sendcounts[j] = maxnb; for(int j = remain; j < grid_nx; j++) sendcounts[j] = 0; displs[0] = 0; for(int j = 1; j < grid_nx; j++) displs[j] = displs[j-1]+sendcounts[j-1]; for(int j = 0; j < nrhs; j++) MPI_Scatterv(src+j*(size_t)lda, sendcounts, displs, MPI_DOUBLE, recv+j*(size_t)ld_temp_A, maxnb, MPI_DOUBLE, 0, mpi_leadingx); } else { int recvcount = 0; if(grid_x < remain) recvcount = maxnb; for(int j = 0; j < nrhs; j++) MPI_Scatterv(NULL, NULL, NULL, MPI_DOUBLE, recv+j*(size_t)ld_temp_A, recvcount, MPI_DOUBLE, 0, mpi_leadingx); } } } MPI_Bcast(temp_A, nrhs*(size_t)ld_temp_A, MPI_DOUBLE, 0, mpi_splitx); //if(mpi_rank == 0) // STARSH_WARNING("DATA DISTRIBUTED!"); //for(int i = 0; i < nrhs; i++) // MPI_Bcast(A+i*lda, ncols, MPI_DOUBLE, 0, MPI_COMM_WORLD); double *temp_D, *temp_B; int num_threads; #ifdef OPENMP #pragma omp parallel #pragma omp master num_threads = omp_get_num_threads(); #else num_threads = 1; #endif if(M->onfly == 0) { STARSH_MALLOC(temp_D, num_threads*nrhs*maxrank); } else { STARSH_MALLOC(temp_D, num_threads*maxnb*maxnb); } int ldout = (F->nbrows+grid_ny-1-grid_y)/grid_ny*maxnb; //STARSH_WARNING("MPI=%d ldA=%d ldB=%d", mpi_rank, ld_temp_A, ldout); STARSH_MALLOC(temp_B, num_threads*(size_t)nrhs*(size_t)ldout); // Setting temp_B=beta*B for master thread of root node and B=0 otherwise #pragma omp parallel { #ifdef OPENMP double *out = temp_B+omp_get_thread_num()*nrhs*ldout; #else double *out = temp_B; #endif for(size_t j = 0; j < nrhs*(size_t)ldout; j++) out[j] = 0.; } if(beta != 0. && mpi_leadingy != MPI_COMM_NULL) { for(STARSH_int i = 0; i < F->nbrows/grid_ny; i++) { double *src = B+i*maxnb*grid_ny; double *recv = temp_B+i*maxnb; for(int j = 0; j < nrhs; j++) MPI_Scatter(src+j*(size_t)ldb, maxnb, MPI_DOUBLE, recv+j*(size_t)ldout, maxnb, MPI_DOUBLE, 0, mpi_leadingy); } #pragma omp parallel for schedule(static) for(int i = 0; i < ldout; i++) for(int j = 0; j < nrhs; j++) temp_B[j*(size_t)ldb+i] *= beta; } //if(mpi_rank == 0) // STARSH_WARNING("MORE DATA DISTRIBUTED"); // Simple cycle over all far-field admissible blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_far_local; lbi++) { STARSH_int bi = F->block_far_local[lbi]; // Get indexes of corresponding block row and block column STARSH_int i = F->block_far[2*bi]; STARSH_int j = F->block_far[2*bi+1]; // Get sizes and rank int nrows = R->size[i]; int ncols = C->size[j]; int rank = M->far_rank[lbi]; if(rank == 0) continue; // Get pointers to data buffers double *U = M->far_U[lbi]->data, *V = M->far_V[lbi]->data; int info = 0; #ifdef OPENMP double *D = temp_D+omp_get_thread_num()*(size_t)nrhs*(size_t)maxrank; double *out = temp_B+omp_get_thread_num()*(size_t)nrhs*(size_t)ldout; #else double *D = temp_D; double *out = temp_B; #endif // Multiply low-rank matrix in U*V^T format by a dense matrix //cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, // ncols, 1.0, V, ncols, A+C->start[j], lda, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasTrans, CblasNoTrans, rank, nrhs, ncols, 1.0, V, ncols, temp_A+(j/grid_nx)*maxnb, ld_temp_A, 0.0, D, rank); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, rank, alpha, U, nrows, D, rank, 1.0, out+i/grid_ny*maxnb, ldout); } //STARSH_WARNING("NODE %d DONE WITH FAR", mpi_rank); if(M->onfly == 1) // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_near_local; lbi++) { STARSH_int bi = F->block_near_local[lbi]; // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; int info = 0; #ifdef OPENMP double *D = temp_D+omp_get_thread_num()*(size_t)maxnb* (size_t)maxnb; double *out = temp_B+omp_get_thread_num()*(size_t)nrhs* (size_t)ldout; #else double *D = temp_D; double *out = temp_B; #endif // Fill temporary buffer with elements of corresponding block kernel(nrows, ncols, R->pivot+R->start[i], C->pivot+C->start[j], RD, CD, D, nrows); // Multiply 2 dense matrices //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, // out+R->start[i], ldout); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0, out+i/grid_ny*(size_t)maxnb, ldout); } else // Simple cycle over all near-field blocks #pragma omp parallel for schedule(dynamic, 1) for(lbi = 0; lbi < nblocks_near_local; lbi++) { STARSH_int bi = F->block_near_local[lbi]; // Get indexes and sizes of corresponding block row and column STARSH_int i = F->block_near[2*bi]; STARSH_int j = F->block_near[2*bi+1]; int nrows = R->size[i]; int ncols = C->size[j]; // Get pointers to data buffers double *D = M->near_D[lbi]->data; #ifdef OPENMP double *out = temp_B+omp_get_thread_num()*(size_t)nrhs* (size_t)ldout; #else double *out = temp_B; #endif // Multiply 2 dense matrices //cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, // nrhs, ncols, alpha, D, nrows, A+C->start[j], lda, 1.0, // out+R->start[i], ldout); cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, nrows, nrhs, ncols, alpha, D, nrows, temp_A+(j/grid_nx)*(size_t)maxnb, ld_temp_A, 1.0, out+i/grid_ny*(size_t)maxnb, ldout); } // Reduce result to temp_B, corresponding to master openmp thread #pragma omp parallel for schedule(static) for(int i = 0; i < ldout; i++) for(int j = 0; j < nrhs; j++) for(int k = 1; k < num_threads; k++) temp_B[j*(size_t)ldout+i] += temp_B[(k*(size_t)nrhs+j)*ldout+i]; //STARSH_WARNING("NODE %d DONE WITH OMP REDUCTION", mpi_rank); MPI_Barrier(MPI_COMM_WORLD); // Since I keep result only on root node, following code is commented //for(int i = 0; i < nrhs; i++) // MPI_Allreduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, // MPI_COMM_WORLD); //for(int i = 0; i < nrhs; i++) // MPI_Reduce(temp_B+i*ldout, B+i*ldb, ldout, MPI_DOUBLE, MPI_SUM, 0, // MPI_COMM_WORLD); double *final_B = NULL; if(mpi_leadingy != MPI_COMM_NULL) { STARSH_MALLOC(final_B, nrhs*(size_t)ldout); #pragma omp parallel for schedule(static) for(size_t i = 0; i < nrhs*(size_t)ldout; i++) final_B[i] = 0.0; } MPI_Reduce(temp_B, final_B, nrhs*(size_t)ldout, MPI_DOUBLE, MPI_SUM, 0, mpi_splity); //STARSH_WARNING("REDUCE(%d): %f", mpi_rank, temp_B[0]); //if(mpi_splity_rank == 0) // STARSH_WARNING("RESULT(%d): %f", mpi_rank, final_B[0]); if(mpi_leadingy != MPI_COMM_NULL) { for(STARSH_int i = 0; i < F->nbrows/grid_ny; i++) { double *src = final_B+i*(size_t)maxnb; double *recv = B+i*(size_t)maxnb*(size_t)grid_ny; for(int j = 0; j < nrhs; j++) MPI_Gather(src+j*(size_t)ldout, maxnb, MPI_DOUBLE, recv+j*(size_t)ldb, maxnb, MPI_DOUBLE, 0, mpi_leadingy); } STARSH_int i = F->nbrows/grid_ny; int remain = F->nbrows-i*grid_ny; if(remain > 0) { double *src = final_B+i*(size_t)maxnb; double *recv = B+i*(size_t)maxnb*(size_t)grid_ny; if(mpi_rank == 0) { int recvcounts[grid_ny], displs[grid_ny]; for(int j = 0; j < remain; j++) recvcounts[j] = maxnb; for(int j = remain; j < grid_ny; j++) recvcounts[j] = 0; displs[0] = 0; for(int j = 1; j < grid_ny; j++) displs[j] = displs[j-1]+recvcounts[j-1]; for(int j = 0; j < nrhs; j++) MPI_Gatherv(src+j*(size_t)ldout, maxnb, MPI_DOUBLE, recv+j*(size_t)ldb, recvcounts, displs, MPI_DOUBLE, 0, mpi_leadingy); } else { int sendcount = 0; if(grid_y < remain) sendcount = maxnb; for(int j = 0; j < nrhs; j++) MPI_Gatherv(src+j*(size_t)ldout, sendcount, MPI_DOUBLE, NULL, NULL, NULL, MPI_DOUBLE, 0, mpi_leadingy); } } MPI_Comm_free(&mpi_leadingy); free(final_B); } if(mpi_leadingx != MPI_COMM_NULL) MPI_Comm_free(&mpi_leadingx); MPI_Comm_free(&mpi_splitx); MPI_Comm_free(&mpi_splity); free(temp_A); free(temp_B); free(temp_D); return STARSH_SUCCESS; }
irbuilder_unroll_partial_heuristic.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK-LABEL: define {{.*}}@unroll_partial_heuristic( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8 // CHECK-NEXT: %[[I:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8 // CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4 // CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8 // CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8 // CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8 // CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8 // CHECK-NEXT: store i32 0, i32* %[[I]], align 4 // CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0 // CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4 // CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]]) // CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_PREHEADER]]: // CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_HEADER]]: // CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ] // CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_COND]]: // CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[DOTCOUNT]] // CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_BODY]]: // CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[OMP_LOOP_IV]], %struct.anon.0* %[[AGG_CAPTURED1]]) // CHECK-NEXT: %[[TMP3:.+]] = load float*, float** %[[B_ADDR]], align 8 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP4]] to i64 // CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP3]], i64 %[[IDXPROM]] // CHECK-NEXT: %[[TMP5:.+]] = load float, float* %[[ARRAYIDX]], align 4 // CHECK-NEXT: %[[TMP6:.+]] = load float*, float** %[[C_ADDR]], align 8 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP7]] to i64 // CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP6]], i64 %[[IDXPROM2]] // CHECK-NEXT: %[[TMP8:.+]] = load float, float* %[[ARRAYIDX3]], align 4 // CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP5]], %[[TMP8]] // CHECK-NEXT: %[[TMP9:.+]] = load float*, float** %[[D_ADDR]], align 8 // CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP10]] to i64 // CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP9]], i64 %[[IDXPROM4]] // CHECK-NEXT: %[[TMP11:.+]] = load float, float* %[[ARRAYIDX5]], align 4 // CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP11]] // CHECK-NEXT: %[[TMP12:.+]] = load float*, float** %[[A_ADDR]], align 8 // CHECK-NEXT: %[[TMP13:.+]] = load i32, i32* %[[I]], align 4 // CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP13]] to i64 // CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP12]], i64 %[[IDXPROM7]] // CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4 // CHECK-NEXT: br label %[[OMP_LOOP_INC]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_INC]]: // CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1 // CHECK-NEXT: br label %[[OMP_LOOP_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_EXIT]]: // CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[OMP_LOOP_AFTER]]: // CHECK-NEXT: ret void // CHECK-NEXT: } void unroll_partial_heuristic(float *a, float *b, float *c, float *d) { #pragma omp unroll partial for (int i = 0; i < 2; i++) { a[i] = b[i] * c[i] * d[i]; } } #endif // HEADER // CHECK-LABEL: define {{.*}}@__captured_stmt( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8 // CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4 // CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4 // CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4 // CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]] // CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_TRUE]]: // CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4 // CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4 // CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]] // CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1 // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]] // CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4 // CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]] // CHECK-NEXT: br label %[[COND_END:.+]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_FALSE]]: // CHECK-NEXT: br label %[[COND_END]] // CHECK-EMPTY: // CHECK-NEXT: [[COND_END]]: // CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ] // CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8 // CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LABEL: define {{.*}}@__captured_stmt.1( // CHECK-NEXT: [[ENTRY:.*]]: // CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8 // CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4 // CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8 // CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8 // CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0 // CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4 // CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4 // CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]] // CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]] // CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8 // CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4 // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4} // CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51} // CHECK: ![[META2:[0-9]+]] = // CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]]} // CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
cuda.h
#ifndef CXXBLAS_AUXILIARY_CUDA_H #define CXXBLAS_AUXILIARY_CUDA_H 1 #if defined(HAVE_CUBLAS) || defined(HAVE_CUSOLVER) #include <string> // XXX #include <vector> #include <thrust/execution_policy.h> #include <thrust/device_ptr.h> // Implement a strided range/iterator #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/permutation_iterator.h> #include <thrust/iterator/transform_iterator.h> namespace cxxblas { class CudaEnv { public: static void release(); static cudaStream_t & getStream(int streamID); static void destroyStream(int streamID); static void syncStream(int streamID); static void eventRecord(int _eventID, int streamID); static void eventSynchronize(int _eventID); static std::string getInfo(); //private: static std::vector<cudaStream_t> streams_; static std::vector<cudaEvent_t> events_; }; // XXX? std::vector<cudaStream_t> CudaEnv::streams_ = {}; std::vector<cudaEvent_t> CudaEnv::events_ = {}; void checkStatus(cudaError_t error); void destroyStream(int streamID); template <typename... Args> void destroyStream(int streamID, Args... args); void syncStream(int streamID); template <typename... Args> void syncStream(int streamID, Args... args); #ifdef HAVE_CUBLAS class CublasEnv { public: static void init(); static void release(); static cublasHandle_t & handle(); static cudaStream_t stream(); static int streamID(); static void setStream(int streamID); static void enableSyncCopy(); static void disableSyncCopy(); static bool isSyncCopyEnabled(); static void syncCopy(); //private: static cublasHandle_t handle_; #pragma omp threadprivate(handle_) static int streamID_; #pragma omp threadprivate(streamID_) static bool syncCopyEnabled_; #pragma omp threadprivate(syncCopyEnabled_) }; // XXX? cublasHandle_t CublasEnv::handle_ = 0; int CublasEnv::streamID_ = -1; bool CublasEnv::syncCopyEnabled_ = true; void checkStatus(cublasStatus_t status); #endif // HAVE_CUBLAS /*! \brief RandomAccessIterator for strided access to array entries. * * \tparam RandomAccessIterator The iterator type used to encapsulate the underlying data. * * \par Overview * \p strided_iterator is an iterator which represents a pointer into * a strided range entries in a underlying array. This iterator is useful * for creating a strided sublist of entries from a larger iterator. */ template <typename RandomAccessIterator> class StridedRange { public: /*! \cond */ typedef typename thrust::iterator_value<RandomAccessIterator>::type value_type; typedef typename thrust::iterator_system<RandomAccessIterator>::type memory_space; typedef typename thrust::iterator_pointer<RandomAccessIterator>::type pointer; typedef typename thrust::iterator_reference<RandomAccessIterator>::type reference; typedef typename thrust::iterator_difference<RandomAccessIterator>::type difference_type; typedef typename thrust::iterator_difference<RandomAccessIterator>::type size_type; struct Strider : thrust::unary_function<difference_type,difference_type> { difference_type stride; Strider(difference_type stride) : stride(stride) {} __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<Strider, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<RandomAccessIterator,TransformIterator> PermutationIterator; // type of the StridedRange iterator typedef PermutationIterator iterator; /*! \endcond */ /*! \brief Null constructor initializes this \p strided_iterator's stride to zero. */ StridedRange(void) : stride(0) {} /*! \brief This constructor builds a \p StridedRange from a range. * \param begin The beginning of the range. * \param end The end of the range. * \param stride The stride between consecutive entries in the iterator. */ StridedRange(RandomAccessIterator first, RandomAccessIterator last, difference_type stride) : first(first), last(last), stride(stride) {} /*! \brief This method returns an iterator pointing to the beginning of * this strided sequence of entries. * \return mStart */ iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), Strider(stride))); } /*! \brief This method returns an iterator pointing to one element past * the last of this strided sequence of entries. * \return mEnd */ iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } /*! \brief Subscript access to the data contained in this iterator. * \param n The index of the element for which data should be accessed. * \return Read/write reference to data. * * This operator allows for easy, array-style, data access. * Note that data access with this operator is unchecked and * out_of_range lookups are not defined. */ reference operator[](size_type n) const { return *(begin() + n); } protected: RandomAccessIterator first; RandomAccessIterator last; difference_type stride; }; // end StridedRange } // end namespace flens #endif // HAVE_CUBLAS || HAVE_CUSOLVER #endif
rotate.h
/*! @file rotate.h * @brief Rotate the buffers. * @author Markovtsev Vadim <v.markovtsev@samsung.com> * @version 1.0 * * @section Notes * This code partially conforms to <a href="http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml">Google C++ Style Guide</a>. * * @section Copyright * Copyright © 2013 Samsung R&D Institute Russia * * @section License * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef SRC_TRANSFORMS_ROTATE_H_ #define SRC_TRANSFORMS_ROTATE_H_ #include "src/transforms/common.h" namespace sound_feature_extraction { namespace transforms { template <class T> class Rotate : public UniformFormatOmpAwareTransform<formats::ArrayFormat<T>> { public: TRANSFORM_INTRO("Rotate", "Rotate the buffers, so that all elements with the " "same index get to the same buffer.", Rotate) virtual bool BufferInvariant() const noexcept override { return false; } protected: virtual size_t OnFormatChanged(size_t buffersCount) override final { this->output_format_->SetSize(buffersCount); return this->input_format_->Size(); } virtual void Do(const BuffersBase<T*>& in, BuffersBase<T*>* out) const noexcept override { #ifdef HAVE_OPENMP #pragma omp parallel for num_threads(this->threads_number()) #endif for (size_t i = 0; i < this->input_format_->Size(); i++) { for (size_t j = 0; j < in.Count(); j++) { (*out)[i][j] = in[j][i]; } } } }; } // namespace transforms } // namespace sound_feature_extraction #endif // SRC_TRANSFORMS_ROTATE_H_
window_layer.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window_layer.c * Description : multi layer window * * + This is part of libaroma, an embedded ui toolkit. * + 07/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_layer_c__ #define __libaroma_window_layer_c__ #include <aroma_internal.h> #include "ui_internal.h" /* max touch x dp to trigger sidebar */ #define _LIBAROMA_LAYER_SIDEBAR_TOUCH_DP 16 /* root window client data */ typedef struct{ LIBAROMA_WINDOWP win; LIBAROMA_WINDOWP sidebar; byte sidebar_showed; int sidebar_xpos; int sidebar_req_x; int sidebar_velocity; byte redraw; LIBAROMA_MUTEX mutex; LIBAROMA_CANVASP tdc; byte on_direct_canvas; byte (*ori_ui_thread)(LIBAROMA_WINDOWP); byte touched; int touch_x; int touch_y; byte allow_scroll; long client_touch_start; LIBAROMA_MSG pretouched_msg; LIBAROMA_CONTROLP pretouched; LIBAROMA_WINDOW_SIDEBAR_SLIDE_CB slide_cb; LIBAROMA_FLING fling; } _LIBAROMA_WINDOW_LAYER, *_LIBAROMA_WINDOW_LAYERP; /* window handler */ byte _libaroma_window_layer_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); void _libaroma_window_layer_postfree(LIBAROMA_WINDOWP win); LIBAROMA_CANVASP _libaroma_window_layer_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ); byte _libaroma_window_layer_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ); byte _libaroma_window_layer_updatedc(LIBAROMA_WINDOWP win); static LIBAROMA_WINDOW_HANDLER _libaroma_window_layer_handler={ prefree:NULL, postfree:_libaroma_window_layer_postfree, updatebg:NULL, invalidate:NULL, sync:_libaroma_window_layer_sync, message_hooker:_libaroma_window_layer_message_hooker, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:NULL, control_draw_begin:_libaroma_window_layer_control_draw_begin }; /* * Function : _libaroma_window_layer_check * Return Value: _LIBAROMA_WINDOW_LAYERP * Descriptions: get root window client data */ static inline _LIBAROMA_WINDOW_LAYERP _libaroma_window_layer_check( LIBAROMA_WINDOWP win){ if (!win){ return NULL; } if (win->parent!=NULL){ return NULL; } if (win->handler!=&_libaroma_window_layer_handler){ return NULL; } return (_LIBAROMA_WINDOW_LAYERP) win->client_data; } /* End of _libaroma_window_layer_check */ /* * Function : _libaroma_window_layer_postfree * Return Value: void * Descriptions: postfree client_data */ void _libaroma_window_layer_postfree(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return; } if (me->sidebar){ if (me->sidebar->active){ me->sidebar->active=0; LIBAROMA_MSG msgia; msgia.msg=LIBAROMA_MSG_WIN_INACTIVE; int i; for (i=0;i<me->sidebar->childn;i++){ if (me->sidebar->childs[i]->handler->message){ me->sidebar->childs[i]->handler->message( me->sidebar->childs[i], &msgia ); } } } libaroma_mutex_lock(me->mutex); libaroma_window_free(me->sidebar); libaroma_mutex_unlock(me->mutex); } libaroma_mutex_lock(me->mutex); if (me->tdc){ libaroma_canvas_free(me->tdc); } win->ui_thread=me->ori_ui_thread; win->handler = NULL; win->client_data = NULL; libaroma_mutex_unlock(me->mutex); libaroma_mutex_free(me->mutex); free(me); } /* End of _libaroma_window_layer_postfree */ /* * Function : _libaroma_window_layer_updatedc * Return Value: byte * Descriptions: update nondirect dc */ byte _libaroma_window_layer_updatedc(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (win->active!=1){ return 0; } libaroma_mutex_lock(me->mutex); if (!me->on_direct_canvas){ if (me->tdc){ byte sidebar_draw=0; if (win->active==1){ if (me->sidebar_showed){ if (me->sidebar){ if (me->sidebar_xpos>0){ libaroma_draw_ex( win->dc, me->sidebar->dc, 0, me->sidebar->y, me->sidebar->w-me->sidebar_xpos, 0, me->sidebar_xpos, me->sidebar->h, 0,0xff ); libaroma_draw_ex(win->dc,me->tdc, me->sidebar_xpos,0, me->sidebar_xpos,0, win->dc->w-me->sidebar_xpos,win->dc->h, LIBAROMA_DRAW_TO_BLACK, 245-(150*me->sidebar_xpos/me->sidebar->w) ); sidebar_draw=1; } } } } if (!sidebar_draw){ libaroma_draw(win->dc,me->tdc,0,0,0); } } } libaroma_mutex_unlock(me->mutex); return 1; } /* End of _libaroma_window_layer_updatedc */ /* * Function : _libaroma_window_layer_sync * Return Value: byte * Descriptions: window sync */ byte _libaroma_window_layer_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } _libaroma_window_layer_updatedc(win); /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of _libaroma_window_layer_sync */ /* * Function : _libaroma_window_layer_control_draw_begin * Return Value: LIBAROMA_CANVASP * Descriptions: get canvas for child control */ LIBAROMA_CANVASP _libaroma_window_layer_control_draw_begin( LIBAROMA_WINDOWP win,LIBAROMA_CONTROLP cctl ){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return NULL; } LIBAROMA_CANVASP c=NULL; libaroma_mutex_lock(me->mutex); LIBAROMA_CANVASP dc = (me->on_direct_canvas)?win->dc:me->tdc; if (!dc){ libaroma_mutex_unlock(me->mutex); return NULL; } c = libaroma_canvas_area( dc, cctl->x, cctl->y, cctl->w, cctl->h ); libaroma_mutex_unlock(me->mutex); return c; } /* End of _libaroma_window_layer_control_draw_begin */ /* * Function : libaroma_window_layer_direct_canvas * Return Value: byte * Descriptions: set as direct canvas */ byte libaroma_window_layer_direct_canvas(LIBAROMA_WINDOWP win, byte state){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (state){ if (!me->on_direct_canvas){ if ((win->dc)&&(me->tdc)) { libaroma_draw(win->dc,me->tdc,0,0,0); } if (me->tdc){ libaroma_canvas_free(me->tdc); me->tdc=NULL; } me->on_direct_canvas=1; } } else{ if (me->on_direct_canvas){ if (win->dc){ if (!me->tdc){ me->tdc = libaroma_canvas( win->dc->w, win->dc->h ); } if (me->tdc) { libaroma_draw(me->tdc,win->dc,0,0,0); } } me->on_direct_canvas=0; } } libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_layer_direct_canvas */ /* * Function : _libaroma_window_layer_set_sidebar_pos * Return Value: byte * Descriptions: set sidebar position */ byte _libaroma_window_layer_set_sidebar_pos(LIBAROMA_WINDOWP win, int x){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (!me->sidebar){ libaroma_mutex_unlock(me->mutex); return 0; } if (x>0){ if (x>me->sidebar->w){ x=me->sidebar->w; } if (!me->sidebar_showed){ if (!me->sidebar->active){ /* activate sidebar */ LIBAROMA_MSG msgr; dword rv=0; msgr.msg=LIBAROMA_MSG_WIN_MEASURED; libaroma_mutex_unlock(me->mutex); me->sidebar->handler->message_hooker(me->sidebar,&msgr,&rv); msgr.msg=LIBAROMA_MSG_WIN_ACTIVE; me->sidebar->handler->message_hooker(me->sidebar,&msgr,&rv); libaroma_window_invalidate(me->sidebar,0); libaroma_mutex_lock(me->mutex); } libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,0); libaroma_mutex_lock(me->mutex); me->sidebar_showed=1; } if (me->sidebar_xpos!=x){ me->redraw=1; } if (x==me->sidebar->w){ if (me->sidebar_showed!=2){ me->sidebar_showed=2; } } else if (me->sidebar_showed==2){ me->sidebar_showed=1; } me->sidebar_xpos=x; } else{ if (me->sidebar_showed){ me->sidebar_showed=0; libaroma_mutex_unlock(me->mutex); libaroma_window_layer_direct_canvas(win,1); libaroma_mutex_lock(me->mutex); } if (me->sidebar_xpos!=0){ me->redraw=1; } me->sidebar_xpos=0; } if (me->slide_cb){ me->slide_cb(me->sidebar, me->sidebar_xpos, me->sidebar->w); } libaroma_mutex_unlock(me->mutex); return 1; } /* End of _libaroma_window_layer_set_sidebar_pos */ byte _libaroma_window_layer_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } if (win->active==1){ if (me->sidebar_showed==2){ if (me->sidebar){ if (me->sidebar->handler->message_hooker( me->sidebar, msg, retval )){ return 1; } } } else if (me->touched==10){ /* cancel any touch event */ if (msg->msg==LIBAROMA_MSG_TOUCH){ return 1; } } else if (me->sidebar){ switch (msg->msg){ case LIBAROMA_MSG_TOUCH: { int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ if (x<libaroma_dp(_LIBAROMA_LAYER_SIDEBAR_TOUCH_DP)){ libaroma_mutex_lock(me->mutex); me->sidebar_velocity=0; me->sidebar_req_x=libaroma_dp(15); me->touched=1; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->fling, x); libaroma_mutex_unlock(me->mutex); return 1; } } else if (me->touched){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ libaroma_mutex_lock(me->mutex); int reqx=x; if (me->touched==2){ reqx=(me->sidebar->w)+(x-me->touch_x); } me->sidebar_req_x=MAX( libaroma_dp(_LIBAROMA_LAYER_SIDEBAR_TOUCH_DP),reqx); libaroma_fling_move(&me->fling, x); libaroma_mutex_unlock(me->mutex); } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ libaroma_mutex_lock(me->mutex); me->sidebar_velocity= ((int) (libaroma_fling_up(&me->fling, x)*1.3)>>8); if (me->sidebar_velocity>=0){ me->sidebar_req_x=0; } else{ me->sidebar_req_x=me->sidebar->w; me->sidebar_velocity=abs(me->sidebar_velocity); } if (me->sidebar_velocity){ /* fix velocity */ int diff = me->sidebar->w; me->sidebar_velocity = MAX(MIN( me->sidebar_velocity, 0.45*diff),0.05*diff); } if (me->sidebar_req_x!=me->sidebar_xpos){ me->touched=10; } libaroma_mutex_unlock(me->mutex); } return 1; } } break; } } } return 0; } /* * Function : _libaroma_window_layer_ui_thread * Return Value: byte * Descriptions: layered window ui thread */ byte _libaroma_window_layer_ui_thread(LIBAROMA_WINDOWP win) { _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } byte need_sync=0; if (win->active==1){ libaroma_mutex_lock(me->mutex); if ((me->sidebar)&&(me->sidebar_req_x!=-1)){ /* show - hide sidebar */ if (!me->sidebar->active){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win,1); libaroma_mutex_lock(me->mutex); } else if (me->sidebar->w<=0){ /* invalid sidebar */ me->sidebar_req_x=-1; } else if (me->sidebar_req_x!=me->sidebar_xpos){ int move_sz = (me->sidebar_req_x-me->sidebar_xpos); if (me->sidebar_velocity!=0){ me->sidebar_velocity=(me->sidebar_velocity*246)>>8; int minw=MAX(1,0.05*me->sidebar->w); if (me->sidebar_velocity<=minw){ me->sidebar_velocity=minw; } if (move_sz<0){ move_sz = 0-me->sidebar_velocity; } else{ move_sz = me->sidebar_velocity; } } else{ move_sz = (move_sz<<6)>>8; } if (abs(move_sz)<2){ if (me->sidebar_req_x<me->sidebar_xpos){ move_sz=-1; } else{ move_sz=1; } } int target_sz = me->sidebar_xpos+move_sz; if (target_sz>=me->sidebar->w){ if ((me->touched)&&(me->touched!=10)){ target_sz=me->sidebar->w-1; } else{ if (me->touched==10){ me->touched=0; } target_sz=me->sidebar->w; me->sidebar_req_x=-1; me->sidebar_velocity=0; } } else if (target_sz<=0){ if ((me->touched)&&(me->touched!=10)){ target_sz=1; } else{ if (me->touched==10){ me->touched=0; } target_sz=0; me->sidebar_req_x=-1; me->sidebar_velocity=0; } } libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win,target_sz); libaroma_mutex_lock(me->mutex); } } libaroma_mutex_unlock(me->mutex); if (me->sidebar_showed){ if (me->sidebar){ if (me->sidebar->ui_thread){ if (me->sidebar->ui_thread(me->sidebar)){ need_sync=1; } } } } } if (me->ori_ui_thread){ if (me->ori_ui_thread(win)){ need_sync = 1; } } libaroma_mutex_lock(me->mutex); if (me->redraw){ need_sync=1; me->redraw=0; } libaroma_mutex_unlock(me->mutex); if (need_sync){ if (win->active==1){ _libaroma_window_layer_updatedc(win); } } return need_sync; } /* End of _libaroma_window_layer_ui_thread */ /* * Function : libaroma_window_layer_init * Return Value: byte * Descriptions: init window as layered window */ byte libaroma_window_layer_init(LIBAROMA_WINDOWP win){ if (!win){ ALOGW("window_root_register win is not valid pointer"); return 0; } if (win->parent){ ALOGW("window_root_register win is not root window"); return 0; } if (win->client_data){ if (win->handler!=&_libaroma_window_layer_handler){ ALOGW("window_root_register window is not valid root window"); return 0; } return 1; } _LIBAROMA_WINDOW_LAYERP me = (_LIBAROMA_WINDOW_LAYERP) calloc(sizeof(_LIBAROMA_WINDOW_LAYER),1); if (!me){ ALOGW("window_root_register cannot alloc internal data"); return 0; } libaroma_mutex_init(me->mutex); libaroma_mutex_lock(me->mutex); me->win = win; me->on_direct_canvas=1; me->ori_ui_thread=win->ui_thread; me->sidebar_req_x=-1; win->handler = &_libaroma_window_layer_handler; win->client_data = me; win->ui_thread=_libaroma_window_layer_ui_thread; libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_layer_init */ /* * Function : libaroma_window_layer_release * Return Value: byte * Descriptions: release layer window */ byte libaroma_window_layer_release(LIBAROMA_WINDOWP win){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return 0; } _libaroma_window_layer_postfree(win); libaroma_window_invalidate(win,1); return 1; } /* End of libaroma_window_layer_release */ /******************************* SIDEBAR **************************************/ /* sidebar window handler */ byte _libaroma_window_sidebar_invalidate(LIBAROMA_WINDOWP win, byte sync); byte _libaroma_window_sidebar_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h); byte _libaroma_window_sidebar_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ); static LIBAROMA_WINDOW_HANDLER _libaroma_window_sidebar_handler={ prefree:NULL, postfree:NULL, updatebg:NULL, invalidate:_libaroma_window_sidebar_invalidate, sync:_libaroma_window_sidebar_sync, message_hooker:_libaroma_window_sidebar_message_hooker, control_draw_flush:NULL, control_erasebg:NULL, control_isvisible:NULL, control_draw_begin:NULL }; /* * Function : _libaroma_window_sidebar_invalidate * Return Value: byte * Descriptions: invalidate */ byte _libaroma_window_sidebar_invalidate(LIBAROMA_WINDOWP win, byte sync){ if (win->handler!=&_libaroma_window_sidebar_handler){ return 0; } if ((win->dc)&&(win->bg)){ libaroma_draw(win->dc,win->bg,0,0,0); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_draw(win->childs[i], 0); } } if (sync){ return _libaroma_window_sidebar_sync(win,0,0,win->w,win->h); } return 1; } /* End of _libaroma_window_sidebar_invalidate */ /* * Function : _libaroma_window_sidebar_sync * Return Value: byte * Descriptions: sync sidebar */ byte _libaroma_window_sidebar_sync(LIBAROMA_WINDOWP win, int x,int y,int w,int h){ if (win->handler!=&_libaroma_window_sidebar_handler){ return 0; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } if (me->sidebar_showed){ me->redraw=1; } return 1; } /* End of _libaroma_window_sidebar_sync */ /* * Function : libaroma_window_sidebar_show * Return Value: byte * Descriptions: show/hide sidebar */ byte libaroma_window_sidebar_show(LIBAROMA_WINDOWP win, byte show){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); if (show){ if (!me->sidebar_showed){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,1); libaroma_mutex_lock(me->mutex); me->sidebar_req_x=win->w; me->touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } else{ if (me->sidebar_showed){ if (me->sidebar_showed==2){ libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,win->w-1); libaroma_mutex_lock(me->mutex); } me->sidebar_req_x=0; me->touched=10; libaroma_mutex_unlock(me->mutex); return 1; } } libaroma_mutex_unlock(me->mutex); return 0; } /* End of libaroma_window_sidebar_show */ /* * Function : _libaroma_window_sidebar_message_hooker * Return Value: byte * Descriptions: sidebar message hooker */ byte _libaroma_window_sidebar_message_hooker( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg, dwordp retval ){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } byte return_state=0; libaroma_mutex_lock(me->mutex); switch (msg->msg){ case LIBAROMA_MSG_KEY_BACK: { libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); return_state=1; goto end_message; } break; case LIBAROMA_MSG_TOUCH: { if (win->parent->active!=1){ return_state=0; goto end_message; } /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ memcpy(&me->pretouched_msg,msg,sizeof(LIBAROMA_MSG)); win->touched = NULL; me->pretouched=NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (x<win->w){ int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ me->pretouched = win->childs[i]; break; } } if (me->pretouched!=NULL){ if (me->pretouched->handler->message){ me->client_touch_start=libaroma_tick(); } else{ me->pretouched=NULL; } } } /* else{ libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); }*/ me->allow_scroll=2; me->touch_x=x; me->touch_y=y; libaroma_fling_down(&me->fling, x); } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ *retval=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (x>=win->w){ libaroma_mutex_unlock(me->mutex); libaroma_window_sidebar_show(win, 0); libaroma_mutex_lock(me->mutex); } else if (me->pretouched){ if (me->pretouched->handler->message){ me->pretouched->handler->message( me->pretouched,&me->pretouched_msg); me->pretouched->handler->message( me->pretouched,msg); } me->pretouched=NULL; me->client_touch_start=0; me->allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win->parent,NULL,&x,&y); if (me->allow_scroll==2){ int move_sz = me->touch_x - x; int move_sz_y = me->touch_y - y; int scrdp=libaroma_dp(24); if ((abs(move_sz_y)>=scrdp)&&(abs(move_sz_y)>=abs(move_sz))){ /* halt the scroll and send to control */ if (me->pretouched){ if (me->pretouched->handler->message){ me->client_touch_start=0; win->touched=me->pretouched; me->pretouched=NULL; win->touched->handler->message( win->touched,&me->pretouched_msg); win->touched->handler->message( win->touched,msg); } else{ me->pretouched=NULL; } me->client_touch_start=0; me->allow_scroll=0; me->touch_x=x; me->touch_y=y; me->redraw=1; } } else if (abs(move_sz)>=scrdp){ me->allow_scroll=1; me->client_touch_start=0; me->pretouched=NULL; win->touched=NULL; me->sidebar_showed=1; me->touched=2; me->touch_x=x; me->touch_y=y; libaroma_mutex_unlock(me->mutex); _libaroma_window_layer_set_sidebar_pos(win->parent,win->w-1); libaroma_mutex_lock(me->mutex); } } } return_state=1; goto end_message; } break; case LIBAROMA_MSG_WIN_ACTIVE: { if (!win->active){ int i; win->active=1; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_MEASURED: { int target_w = libaroma_window_usedp(2)?win->rw:libaroma_dp(win->rw); target_w = libaroma_window_measure_calculate( target_w,win->rw,win->parent->w,1,0 ); int max_target_w = win->parent->w-libaroma_dp(56); if (target_w>max_target_w){ target_w=max_target_w; } win->x=win->y=win->rx=win->ry=win->left=win->top=0; win->ax=win->x; win->ay=win->y; win->w = target_w; win->h = win->parent->h; if (libaroma_window_usedp(2)){ win->rw=win->width=libaroma_px(win->w); win->rh=win->height=libaroma_px(win->h); } else{ win->rw=win->width= win->w; win->rh=win->height= win->h; } if (win->dc){ if ((win->dc->w!=win->w)||(win->dc->h!=win->h)){ libaroma_canvas_free(win->dc); if (win->bg){ libaroma_canvas_free(win->bg); } win->dc=NULL; win->bg=NULL; } } if (!win->dc){ win->dc = libaroma_canvas(win->w,win->h); win->bg = libaroma_canvas(win->w,win->h); libaroma_canvas_setcolor(win->dc,0xffff,0); libaroma_canvas_setcolor(win->bg,0xffff,0); } /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; } end_message: libaroma_mutex_unlock(me->mutex); return return_state; } /* End of _libaroma_window_sidebar_message_hooker */ /* * Function : _libaroma_window_sidebar_ui_thread * Return Value: byte * Descriptions: window sidebar ui thread */ byte _libaroma_window_sidebar_ui_thread(LIBAROMA_WINDOWP win) { _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } int i; byte need_sync = 0; if (win->active==1){ /* pretouched */ libaroma_mutex_lock(me->mutex); if ((me->client_touch_start!=0)&& (libaroma_tick()-me->client_touch_start>180)){ me->client_touch_start=0; if (me->pretouched!=NULL){ win->touched=me->pretouched; me->pretouched=NULL; if (win->touched->handler->message){ win->touched->handler->message( win->touched,&me->pretouched_msg); } } } libaroma_mutex_unlock(me->mutex); #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (c->handler->thread(c)){ if(libaroma_control_draw(c,0)){ need_sync=1; } } } } } return need_sync; } /* End of _libaroma_window_sidebar_ui_thread */ /* * Function : libaroma_window_sidebar * Return Value: LIBAROMA_WINDOWP * Descriptions: new or get sidebar window */ LIBAROMA_WINDOWP libaroma_window_sidebar(LIBAROMA_WINDOWP win, int width){ if (!libaroma_window_layer_init(win)){ return NULL; } _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win); if (!me){ return NULL; } if (me->sidebar){ return me->sidebar; } libaroma_mutex_lock(me->mutex); LIBAROMA_WINDOWP sidebar = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!sidebar){ libaroma_mutex_unlock(me->mutex); ALOGW("window_sidebar alloc sidebar data failed"); return NULL; } sidebar->rw = width; sidebar->handler=&_libaroma_window_sidebar_handler; sidebar->parent=win; sidebar->ui_thread=_libaroma_window_sidebar_ui_thread; me->sidebar=sidebar; me->redraw=1; libaroma_mutex_unlock(me->mutex); return sidebar; } /* End of libaroma_window_sidebar */ /* * Function : libaroma_window_sidebar_onslide * Return Value: byte * Descriptions: set sidebar slide position callback */ byte libaroma_window_sidebar_onslide( LIBAROMA_WINDOWP win, LIBAROMA_WINDOW_SIDEBAR_SLIDE_CB cb){ _LIBAROMA_WINDOW_LAYERP me = _libaroma_window_layer_check(win->parent); if (!me){ return 0; } libaroma_mutex_lock(me->mutex); me->slide_cb = cb; ALOGI("Init sidebar slide callback"); libaroma_mutex_unlock(me->mutex); return 1; } /* End of libaroma_window_sidebar_onslide */ #endif /* __libaroma_window_layer_c__ */
DFTW_omp.c
// the code calculates a DFT of a random complex number input and // then an IDFT. The IDFT result should be the input vector // to compile with gcc // gcc -Wall -O2 -fopenmp -o DFTW DFTW.c -lm // written by stef // exercise #include "stdio.h" // printf #include "stdlib.h" // malloc and rand for instance. Rand not thread safe! #include "time.h" // time(0) to get random seed #include "math.h" // sine and cosine #include "omp.h" // openmp library like timing // two pi #define PI2 6.28318530718 // this for the rounding error, increasing N rounding error increases // 0.01 precision good for N > 8000 #define R_ERROR 0.01 // main routine to calculate DFT int DFT(int idft, double* xr, double* xi, double* Xr_o, double* Xi_o, int N); // Compute mean and standard deviation void compute_statistics(double *times, size_t n_runs, double *avg, double *std_dev); // set the input array with random number int fillInput(double* xr, double* xi, int N); // set to zero the input vector int setOutputZero(double* Xr_o, double* Xi_o, int N); // check if x = IDFT(DFT(x)) int checkResults(double* xr, double* xi, double* xr_check, double* xi_check, double* Xr_o, double* Xi_r, int N); // print the results of the DFT int printResults(double* xr, double* xi, int N); int main(int argc, char* argv[]){ // size of input array int N = 10000; // 8,000 is a good number for testing size_t NRUNS = 10; int DEBUG = 0; printf("DFTW calculation with N = %d \n",N); // Allocate array for input vector double* xr = (double*) malloc (N *sizeof(double)); double* xi = (double*) malloc (N *sizeof(double)); // for checking purposes double* xr_check = (double*) malloc (N *sizeof(double)); double* xi_check = (double*) malloc (N *sizeof(double)); // Allocate array for output vector double* Xr_o = (double*) malloc (N *sizeof(double)); double* Xi_o = (double*) malloc (N *sizeof(double)); double times[NRUNS]; // Vary the number of threads: 1, 2, 4, 8, 16, 20, 24, 28, 32 for (int n_threads = 1; n_threads <= 32; n_threads = (n_threads <= 8) ? 2 * n_threads : n_threads + 4) { omp_set_num_threads(n_threads); for (int i = 0; i < NRUNS; ++i) { fillInput(xr,xi,N); setOutputZero(xr_check,xi_check,N); setOutputZero(Xr_o,Xi_o,N); // start timer double start_time = omp_get_wtime(); // DFT int idft = 1; DFT(idft,xr,xi,Xr_o,Xi_o,N); // IDFT idft = -1; DFT(idft,Xr_o,Xi_o,xr_check,xi_check,N); // stop timer times[i] = omp_get_wtime() - start_time; // check the results: easy to make correctness errors with openMP checkResults(xr,xi,xr_check,xi_check,Xr_o, Xi_o, N); // print the results of the DFT if (DEBUG) { printResults(Xr_o,Xi_o,N); } } double avg, std_dev; compute_statistics(times, NRUNS, &avg, &std_dev); printf("DFTW (%2d threads) - Average: %fs - Std. deviation: %fs\n", n_threads, avg, std_dev); } // take out the garbage free(xr); free(xi); free(Xi_o); free(Xr_o); free(xr_check); free(xi_check); return 0; } // DFT/IDFT routine // idft: 1 direct DFT, -1 inverse IDFT (Inverse DFT) int DFT(int idft, double* xr, double* xi, double* Xr_o, double* Xi_o, int N){ #pragma omp parallel { double local_Xr, local_Xi; #pragma omp for for (int k=0 ; k<N ; k++) { local_Xr = Xr_o[k]; local_Xi = Xi_o[k]; for (int n=0 ; n<N ; n++) { // Real part of X[k] local_Xr += xr[n] * cos(n * k * PI2 / N) + idft * xi[n] * sin(n * k * PI2 / N); // Imaginary part of X[k] local_Xi += -idft * xr[n] * sin(n * k * PI2 / N) + xi[n] * cos(n * k * PI2 / N); } // normalize if you are doing IDFT if (idft == -1) { local_Xr /= N; local_Xi /= N; } Xr_o[k] = local_Xr; Xi_o[k] = local_Xi; } } return 1; } void compute_statistics(double *times, size_t n_runs, double *avg, double *std_dev) { *avg = 0; for (int i = 0; i < n_runs; ++i) { *avg += times[i]; } *avg /= n_runs; *std_dev = 0; for (int i = 0; i < n_runs; ++i) { *std_dev += pow(times[i] - *avg, 2); } *std_dev /= n_runs; *std_dev = sqrt(*std_dev); } // set the initial signal // be careful with this // rand() is NOT thread safe in case int fillInput(double* xr, double* xi, int N){ srand(time(0)); for(int n=0; n < 100000;n++) // get some random number first rand(); for(int n=0; n < N;n++){ // Generate random discrete-time signal x in range (-1,+1) //xr[n] = ((double)(2.0 * rand()) / RAND_MAX) - 1.0; //xi[n] = ((double)(2.0 * rand()) / RAND_MAX) - 1.0; // constant real signal xr[n] = 1.0; xi[n] = 0.0; } return 1; } // set to zero the output vector int setOutputZero(double* Xr_o, double* Xi_o, int N){ for(int n=0; n < N;n++){ Xr_o[n] = 0.0; Xi_o[n] = 0.0; } return 1; } // check if x = IDFT(DFT(x)) int checkResults(double* xr, double* xi, double* xr_check, double* xi_check, double* Xr_o, double* Xi_r, int N){ // x[0] and x[1] have typical rounding error problem // interesting there might be a theorem on this for(int n=0; n < N;n++){ if (fabs(xr[n] - xr_check[n]) > R_ERROR) printf("ERROR - x[%d] = %f, inv(X)[%d]=%f \n",n,xr[n], n,xr_check[n]); if (fabs(xi[n] - xi_check[n]) > R_ERROR) printf("ERROR - x[%d] = %f, inv(X)[%d]=%f \n",n,xi[n], n,xi_check[n]); } //printf("Xre[0] = %f \n",Xr_o[0]); return 1; } // print the results of the DFT int printResults(double* xr, double* xi, int N){ for(int n=0; n < N;n++) printf("Xre[%d] = %f, Xim[%d] = %f \n", n, xr[n], n, xi[n]); return 1; }
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource), (MagickSizeType) MAGICK_SSIZE_MAX); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(number_threads, 2*sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); (void) exception; cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) { #if defined(MAGICKCORE_HAVE_LINUX_SENDFILE) if (cache_info->length < 0x7ffff000) { count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL, (size_t) cache_info->length); if (count == (ssize_t) cache_info->length) return(MagickTrue); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); } #endif quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); } buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { const Quantum *magick_restrict p; Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; const Quantum *magick_restrict p; const void *magick_restrict r; Quantum *magick_restrict q; ssize_t i, u; unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict p, *magick_restrict q; ssize_t y; /* Apply composite mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickFalse); for (y=0; y < (ssize_t) nexus_info->region.height; y++) { ssize_t x; for (x=0; x < (ssize_t) nexus_info->region.width; x++) { double mask_alpha; ssize_t i; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity) { length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); } source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->type=DiskCache; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length == (MagickSizeType) ((size_t) length)) { status=AcquireMagickResource(MapResource,cache_info->length); if (status != MagickFalse) { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) MAGICK_SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; ssize_t y; unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; Quantum *magick_restrict q; ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static inline MagickBooleanType ValidatePixelOffset(const ssize_t x, const size_t a) { if ((x >= 0) && (x >= ((ssize_t) MAGICK_SSIZE_MAX-(ssize_t) a))) return(MagickFalse); if (x <= ((ssize_t) MAGICK_SSIZE_MIN+(ssize_t) a)) return(MagickFalse); return(MagickTrue); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit) || (ValidatePixelOffset(x,width) == MagickFalse) || (ValidatePixelOffset(y,height) == MagickFalse)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const unsigned char *magick_restrict p; ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; const Quantum *magick_restrict p; ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
myFunc_cilk.h
//#define __declspec(x) // Rob Farber #include <stdlib.h> #include <string.h> #include <stdint.h> #include <malloc.h> #include <math.h> //#include <omp.h> #include <time.h> #include <sys/time.h> #include <cilk/cilk.h> #include <cilk/reducer_opadd.h> #define MICRO_IN_SEC 1000000.00 #define MIC_DEV 0 #define ALLOC alloc_if(1) free_if(0) #define FREE alloc_if(0) free_if(1) #define REUSE alloc_if(0) free_if(0) // Use a struct to pass and get data from the objective function typedef struct userData { // Data information int nExamples; // __declspec(align(64)) float * restrict example; float * example; // __declspec(align(64)) float * restrict param; float * param; // Timing information int isWarmup; double timeObjFunc; int countObjFunc; double timeDataLoad; double minTime, maxTime; } userData_t; double microtime(){ int tv_sec,tv_usec; double time; struct timeval tv; struct timezone tz; gettimeofday(&tv,&tz); return tv.tv_sec+tv.tv_usec/MICRO_IN_SEC; } // function to measure wall clock time inline double getTime() { /* return(omp_get_wtime()); */ return microtime();} #pragma offload_attribute (push, target (mic)) // helper macros to index into the example array #define IN(i,nExamples,j) (i*nExamples+j) #define OUT(i,nExamples,j) ((i+N_INPUT)*nExamples+j) // Define the Sigmoid #ifdef USE_LINEAR char *desc="generated_PCA_func LINEAR()"; inline float G(float x) { return( x ) ;} #define G_ESTIMATE 0 #elif USE_TANH char *desc="generated_func tanh()"; inline float G(float x) { return( tanhf(x) ) ;} #define G_ESTIMATE 7 // estimate 7 flops for G #elif LOGISTIC char *desc="generated func logistic()"; inline float G(float x) { return( 1.f/(1.f+expf(-x)) ) ;} #define G_ESTIMATE 7 // estimate flops for G #else // Use Elliott function char *desc="generated func Eliott activation: x/(1+fabsf(x))"; inline float G(float x) { return( x/(1.f+fabsf(x)) ) ;} #define G_ESTIMATE 3 // estimate flops for G #endif // This file defines the function to be evaluated #include "fcn.h" double _bigloop( float * example, float * param, int nExamples) { // double err=0.; // initialize error here in case offload selected //#pragma omp parallel for reduction(+ : err) cilk::reducer_opadd<double> err(0); cilk_for(int i=0; i < nExamples; i++) { float d=myFunc(i, param, example, nExamples, NULL); err += d*d; } return err.get_value(); } // The offload objective function double _objFunc(unsigned int n, const double * x, double * grad, void * my_func_data) { // double err; userData_t *uData = (userData_t *) my_func_data; // convert from double to float for speed for(int i=0; i < N_PARAM; i++) uData->param[i]=x[i]; cilk::reducer_opadd<double> err(0); int nExamples = uData->nExamples; float * example = uData->example; // compiler workaround //__declspec(align(64)) float * restrict example = uData->example; // compiler workaround float * param = uData->param; // compiler workaround //__declspec(align(64)) float * restrict param = uData->param; // compiler workaround /*#pragma offload target(mic:MIC_DEV) in(param:length(N_PARAM) REUSE) out(err) in(example:length(0) REUSE)*/ #ifdef USE_OLD_COMPILER { // err=0.; // initialize error here in case offload selected //#pragma omp parallel for reduction(+ : err) cilk_for(int i=0; i < nExamples; i++) { float d=myFunc(i, param, example, nExamples, NULL); err += d*d; } } #else err.set_value( _bigloop(example, param, nExamples) ); #endif return sqrt(err.get_value()); } //#pragma offload_attribute (pop) // The optizimation library callable objective function that gathers timing information double objFunc(unsigned int n, const double * x, double * grad, void * my_func_data) { if(grad) { fprintf(stderr,"Gradient not implemented!\n"); exit(1); } userData_t *uData = (userData_t *) my_func_data; double runTime=getTime(); double err = _objFunc(n,x,grad,my_func_data); runTime = getTime() - runTime; if(!uData->isWarmup) { // Note a maxTime of zero means this is the first call if(uData->maxTime == 0.) { uData->maxTime = uData->minTime = runTime; } uData->maxTime = (uData->maxTime > runTime)?uData->maxTime:runTime; uData->minTime = (uData->minTime < runTime)?uData->minTime:runTime; uData->timeObjFunc += runTime; uData->countObjFunc++; } return( err ); } // Called to free memory and report timing information void fini(userData_t *uData) { /* int nThreads=0; // Intel recommended way to get the number of threads in offload mode. #pragma offload target(mic:MIC_DEV) out(nThreads) { #pragma omp parallel { #pragma omp single { nThreads = omp_get_num_threads(); } } } // Ouput some information if(!uData->isWarmup) { printf("number OMP threads %d\n", nThreads); printf("DataLoadTime %g\n", uData->timeDataLoad); printf("AveObjTime %g, countObjFunc %d, totalObjTime %g\n", uData->timeObjFunc/uData->countObjFunc, uData->countObjFunc, uData->timeObjFunc); #ifdef FLOP_ESTIMATE printf("Estimated flops in myFunc %d, estimated average GFlop/s %g\n", FLOP_ESTIMATE, (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->timeObjFunc/uData->countObjFunc)/1.e9) ); printf("Estimated maximum GFlop/s %g, minimum GFLop/s %g\n", (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->minTime)/1.e9), (((double)uData->nExamples*FLOP_ESTIMATE)/(uData->maxTime)/1.e9) ); } #endif // free if using offload mode __declspec(align(64)) float * restrict example = uData->example;// compiler workaround __declspec(align(64)) float * restrict param = uData->param;// compiler workaround #pragma offload target(mic:MIC_DEV) in(example: length(0) FREE) in(param : length(0) FREE) {} // free on the host if(uData->example) free(uData->example); uData->example=NULL; if(uData->param) free(uData->param); uData->param=NULL; */ } void offloadData(userData_t *uData) { #ifdef __INTEL_OFFLOAD int nDevices =_Offload_number_of_devices(); if(nDevices == 0) { fprintf(stderr,"No devices found!\n"); exit -1; } // If necessary, perform offload transfer and allocation double startOffload=getTime(); float * example = uData->example; // compiler workaround //__declspec(align(64)) float * restrict example = uData->example; // compiler workaround float * param = uData->param; // compiler workaround //__declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in for param just allocates memory on the device /* #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} */ // set data load time if using offload mode uData->timeDataLoad = getTime() - startOffload; #endif } // loads the binary file of the form: // nInput, nOutput, nExamples // Input [0] [0:nExamples] // Input [1] [0:nExamples] // ... // Output [0] [0:nExamples] // Output [1] [0:nExamples] // ... void init(char*filename, userData_t *uData) { FILE *fn=stdin; // check if reading from stdin if(strcmp("-", filename) != 0) fn=fopen(filename,"r"); if(!fn) { fprintf(stderr,"Cannot open %s\n",filename); exit(1); } // read the header information double startTime=getTime(); int32_t nInput, nOutput; int32_t nExamples; fread(&nInput,sizeof(int32_t), 1, fn); if(nInput != N_INPUT) { fprintf(stderr,"Number of inputs incorrect!\n"); exit(1); } fread(&nOutput,sizeof(int32_t), 1, fn); if(nOutput != N_OUTPUT) { fprintf(stderr,"Number of outputs incorrect!\n"); exit(1); } fread(&nExamples,sizeof(int32_t), 1, fn); if(nExamples <= 0) { fprintf(stderr,"Number of examples incorrect!\n"); exit(1); } uData->nExamples = nExamples; // aligned allocation of the data uData->example=(float*) memalign(64,nExamples*EXAMPLE_SIZE*sizeof(float)); if(!uData->example) { fprintf(stderr,"Not enough memory for examples!\n"); exit(1); } // aligned allocation of the on-device parameters uData->param=(float*) memalign(64,N_PARAM*sizeof(float)); if(!uData->param) { fprintf(stderr,"Not enough memory for the parameters!\n"); exit(1); } // read the data for(int exIndex=0; exIndex < uData->nExamples; exIndex++) { for(int i=0; i < nInput; i++) fread(&uData->example[IN(i,uData->nExamples, exIndex)],1, sizeof(float), fn); for(int i=0; i < nOutput; i++) fread(&uData->example[OUT(i,uData->nExamples, exIndex)],1, sizeof(float), fn); } // offload the data double startOffload=getTime(); float * example = uData->example; // compiler workaround //__declspec(align(64)) float * restrict example = uData->example; // compiler workaround float * param = uData->param; // compiler workaround //__declspec(align(64)) float * restrict param = uData->param; // compiler workaround int Xsiz = uData->nExamples*EXAMPLE_SIZE; // compiler workaround // Note: the in just allocates memory on the device /* #pragma offload target(mic:MIC_DEV) in(example: length(Xsiz) ALLOC) in(param : length(N_PARAM) ALLOC) {} */ uData->timeDataLoad = getTime() - startTime; if(fn!=stdin) fclose(fn); }
GB_binop__bxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_int16) // C=scalar+B GB (_bind1st__bxor_int16) // C=scalar+B' GB (_bind1st_tran__bxor_int16) // C=A+scalar GB (_bind2nd__bxor_int16) // C=A'+scalar GB (_bind2nd_tran__bxor_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_INT16 || GxB_NO_BXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
image_random-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file image_random-inl.h * \brief * \author */ #ifndef MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_ #define MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_ #include <algorithm> #include <cmath> #include <limits> #include <tuple> #include <utility> #include <vector> #include "mxnet/base.h" #include "../mxnet_op.h" #include "../operator_common.h" #if MXNET_USE_OPENCV #include <opencv2/opencv.hpp> #endif // MXNET_USE_OPENCV namespace mxnet { namespace op { namespace image { using namespace mshadow; #if MXNET_USE_CUDA // NOTE: Kernel launch/map was extremely costly. // Hence, we use separate CUDA kernels for these operators. template<typename DType, typename T1, typename T2> void ToTensorImplCUDA(mshadow::Stream<gpu> *s, const T1 input, const T2 output, const int req, const float normalize_factor); template<typename DType> void NormalizeImplCUDA(mshadow::Stream<gpu> *s, const DType *input, DType *output, const int req, const int N, const int C, const int H, const int W, const float mean_d0, const float mean_d1, const float mean_d2, const float std_d0, const float std_d1, const float std_d2); template<typename DType> void NormalizeBackwardImplCUDA(mshadow::Stream<gpu> *s, const DType *out_grad, DType *in_grad, const int req, const int N, const int C, const int H, const int W, const float std_d0, const float std_d1, const float std_d2); #endif // MXNET_USE_CUDA // Shape and Type inference for image to tensor operator inline bool ToTensorShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); mxnet::TShape &shp = (*in_attrs)[0]; if (!shape_is_known(shp)) return false; CHECK((shp.ndim() == 3) || (shp.ndim() == 4)) << "Input image must have shape (height, width, channels), or " << "(N, height, width, channels) but got " << shp; if (shp.ndim() == 3) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[2], shp[0], shp[1]})); } else if (shp.ndim() == 4) { SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({shp[0], shp[3], shp[1], shp[2]})); } return true; } inline bool ToTensorType(const nnvm::NodeAttrs& attrs, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32); return (*in_attrs)[0] != -1; } // Operator Implementation template<typename DType, int req> inline void ToTensor(float* out_data, const DType* in_data, const int length, const int channels, const float normalize_factor, const int step) { // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (int c = 0; c < channels; ++c) { for (int i = 0; i < length; ++i) { KERNEL_ASSIGN(out_data[step + c*length + i], req, (in_data[step + i*channels + c]) / normalize_factor); } } } inline void ToTensorImpl(const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const std::vector<OpReqType> &req, const int length, const int channel, const float normalize_factor, const int step) { MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { float* output = outputs[0].dptr<float>(); DType* input = inputs[0].dptr<DType>(); ToTensor<DType, req_type>(output, input, length, channel, normalize_factor, step); }); }); } template<typename xpu> void ToTensorOpForward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); // We do not use temp buffer when performance the operation. // Hence, this check is necessary. CHECK_EQ(req[0], kWriteTo) << "`to_tensor` does not support inplace updates"; const float normalize_factor = 255.0f; if (std::is_same<xpu, gpu>::value) { #if MXNET_USE_CUDA mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { if (inputs[0].ndim() == 3) { Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s); Tensor<gpu, 3, float> output = outputs[0].get<gpu, 3, float>(s); ToTensorImplCUDA<DType, Tensor<gpu, 3, DType>, Tensor<gpu, 3, float>> (s, input, output, req_type, normalize_factor); } else { Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s); Tensor<gpu, 4, float> output = outputs[0].get<gpu, 4, float>(s); ToTensorImplCUDA<DType, Tensor<gpu, 4, DType>, Tensor<gpu, 4, float>> (s, input, output, req_type, normalize_factor); } }); }); #else LOG(FATAL) << "Compile with USE_CUDA=1 to use ToTensor operator on GPU."; #endif // MXNET_USE_CUDA } else if (inputs[0].ndim() == 3) { // 3D Input - (h, w, c) const int length = inputs[0].shape_[0] * inputs[0].shape_[1]; const int channel = static_cast<int>(inputs[0].shape_[2]); const int step = 0; ToTensorImpl(inputs, outputs, req, length, channel, normalize_factor, step); } else if (inputs[0].ndim() == 4) { // 4D input (n, h, w, c) const int batch_size = inputs[0].shape_[0]; const int length = inputs[0].shape_[1] * inputs[0].shape_[2]; const int channel = static_cast<int>(inputs[0].shape_[3]); const int step = channel * length; #pragma omp parallel for for (auto n = 0; n < batch_size; ++n) { ToTensorImpl(inputs, outputs, req, length, channel, normalize_factor, n*step); } } } struct NormalizeParam : public dmlc::Parameter<NormalizeParam> { mxnet::Tuple<float> mean; mxnet::Tuple<float> std; DMLC_DECLARE_PARAMETER(NormalizeParam) { DMLC_DECLARE_FIELD(mean) .set_default(mxnet::Tuple<float> {0.0f, 0.0f, 0.0f, 0.0f}) .describe("Sequence of means for each channel. " "Default value is 0."); DMLC_DECLARE_FIELD(std) .set_default(mxnet::Tuple<float> {1.0f, 1.0f, 1.0f, 1.0f}) .describe("Sequence of standard deviations for each channel. " "Default value is 1."); } }; // Shape and Type inference for image Normalize operator // Shape inference inline bool NormalizeOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); const auto& dshape = (*in_attrs)[0]; if (!dshape.ndim()) return false; CHECK((dshape.ndim() == 3) || (dshape.ndim() == 4)) << "Input tensor must have shape (channels, height, width), or " << "(N, channels, height, width), but got " << dshape; int nchannels = 0; if (dshape.ndim() == 3) { nchannels = dshape[0]; CHECK(nchannels == 3 || nchannels == 1) << "The first dimension of input tensor must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; } else if (dshape.ndim() == 4) { nchannels = dshape[1]; CHECK(nchannels == 3 || nchannels == 1) << "The second dimension of input tensor must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; } CHECK((param.mean.ndim() == 1) || (param.mean.ndim() == nchannels)) << "Invalid mean for input with shape " << dshape << ". mean must have either 1 or " << nchannels << " elements, but got " << param.mean; CHECK(param.std.ndim() == 1 || param.std.ndim() == nchannels) << "Invalid std for input with shape " << dshape << ". std must have either 1 or " << nchannels << " elements, but got " << param.std; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } // Type Inference inline bool NormalizeOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_attrs->at(0)); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_attrs->at(0)); return out_attrs->at(0) != -1; } template<typename DType, int req> inline void Normalize(DType* out_data, const DType* in_data, const int length, const int channels, const int step, const std::vector<float> mean, const std::vector<float> std) { // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (int c = 0; c < channels; ++c) { for (int i = 0; i < length; ++i) { KERNEL_ASSIGN(out_data[step + c*length + i], req, (in_data[step + c*length + i] - mean[c]) / std[c]); } } } inline void NormalizeImpl(const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const std::vector<OpReqType> &req, const int length, const int channels, const int step, const std::vector<float> mean, const std::vector<float> std) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { DType* input = inputs[0].dptr<DType>(); DType* output = outputs[0].dptr<DType>(); Normalize<DType, req_type>(output, input, length, channels, step, mean, std); }); }); } template<typename xpu> void NormalizeOpForward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); // Mean and Std can be 1 or 3D only. std::vector<float> mean(3); std::vector<float> std(3); if (param.mean.ndim() == 1) { mean[0] = mean[1] = mean[2] = param.mean[0]; } else { mean[0] = param.mean[0]; mean[1] = param.mean[1]; mean[2] = param.mean[2]; } if (param.std.ndim() == 1) { std[0] = std[1] = std[2] = param.std[0]; } else { std[0] = param.std[0]; std[1] = param.std[1]; std[2] = param.std[2]; } if (std::is_same<xpu, gpu>::value) { #if MXNET_USE_CUDA mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { int N, C, H, W; DType *input = nullptr; DType *output = nullptr; if (inputs[0].ndim() == 3) { N = 1; C = static_cast<int>(inputs[0].shape_[0]); H = static_cast<int>(inputs[0].shape_[1]); W = static_cast<int>(inputs[0].shape_[2]); input = (inputs[0].get<gpu, 3, DType>(s)).dptr_; output = (outputs[0].get<gpu, 3, DType>(s)).dptr_; } else { N = static_cast<int>(inputs[0].shape_[0]); C = static_cast<int>(inputs[0].shape_[1]); H = static_cast<int>(inputs[0].shape_[2]); W = static_cast<int>(inputs[0].shape_[3]); input = (inputs[0].get<gpu, 4, DType>(s)).dptr_; output = (outputs[0].get<gpu, 4, DType>(s)).dptr_; } NormalizeImplCUDA<DType>(s, input, output, req_type, N, C, H, W, mean[0], mean[1], mean[2], std[0], std[1], std[2]); }); }); #else LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize operator on GPU."; #endif // MXNET_USE_CUDA } else if (inputs[0].ndim() == 3) { // 3D input (c, h, w) const int length = inputs[0].shape_[1] * inputs[0].shape_[2]; const int channel = static_cast<int>(inputs[0].shape_[0]); const int step = 0; NormalizeImpl(inputs, outputs, req, length, channel, step, mean, std); } else if (inputs[0].ndim() == 4) { // 4D input (n, c, h, w) const int batch_size = inputs[0].shape_[0]; const int length = inputs[0].shape_[2] * inputs[0].shape_[3]; const int channel = static_cast<int>(inputs[0].shape_[1]); const int step = channel * length; #pragma omp parallel for for (auto n = 0; n < batch_size; ++n) { NormalizeImpl(inputs, outputs, req, length, channel, n*step, mean, std); } } } // Backward function template<typename DType, int req> inline void NormalizeBackward(const DType* out_grad, DType* in_grad, const int length, const int channels, const int step, const std::vector<float> std) { // Microsoft Visual C++ compiler does not support omp collapse #ifdef _MSC_VER #pragma omp parallel for #else #pragma omp parallel for collapse(2) #endif // _MSC_VER for (int c = 0; c < channels; ++c) { for (int i = 0; i < length; ++i) { KERNEL_ASSIGN(in_grad[step + c*length + i], req, out_grad[step + c*length + i] * (1.0 / std[c])); } } } inline void NormalizeBackwardImpl(const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const std::vector<OpReqType> &req, const int length, const int channels, const int step, const std::vector<float> std ) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { DType* out_grad = inputs[0].dptr<DType>(); DType* in_grad = outputs[0].dptr<DType>(); NormalizeBackward<DType, req_type>(out_grad, in_grad, length, channels, step, std); }); }); } template<typename xpu> void NormalizeOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const NormalizeParam &param = nnvm::get<NormalizeParam>(attrs.parsed); // Std can be 1 or 3D only. std::vector<float> std(3); if (param.std.ndim() == 1) { std[0] = std[1] = std[2] = param.std[0]; } else { std[0] = param.std[0]; std[1] = param.std[1]; std[2] = param.std[2]; } // Note: inputs[0] is out_grad const TBlob& in_data = inputs[1]; if (std::is_same<xpu, gpu>::value) { #if MXNET_USE_CUDA mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], req_type, { int N, C, H, W; DType *in_grad = nullptr; DType *out_grad = nullptr; if (in_data.ndim() == 3) { N = 1; C = static_cast<int>(in_data.shape_[0]); H = static_cast<int>(in_data.shape_[1]); W = static_cast<int>(in_data.shape_[2]); out_grad = (inputs[0].get<gpu, 3, DType>(s)).dptr_; in_grad = (outputs[0].get<gpu, 3, DType>(s)).dptr_; } else { N = static_cast<int>(in_data.shape_[0]); C = static_cast<int>(in_data.shape_[1]); H = static_cast<int>(in_data.shape_[2]); W = static_cast<int>(in_data.shape_[3]); out_grad = (inputs[0].get<gpu, 4, DType>(s)).dptr_; in_grad = (outputs[0].get<gpu, 4, DType>(s)).dptr_; } NormalizeBackwardImplCUDA<DType>(s, out_grad, in_grad, req_type, N, C, H, W, std[0], std[1], std[2]); }); }); #else LOG(FATAL) << "Compile with USE_CUDA=1 to use Normalize backward operator on GPU."; #endif // MXNET_USE_CUDA } else if (in_data.ndim() == 3) { // 3D input (c, h, w) const int length = in_data.shape_[1] * in_data.shape_[2]; const int channel = static_cast<int>(in_data.shape_[0]); const int step = 0; NormalizeBackwardImpl(inputs, outputs, req, length, channel, step, std); } else if (in_data.ndim() == 4) { // 4D input (n, c, h, w) const int batch_size = in_data.shape_[0]; const int length = in_data.shape_[2] * in_data.shape_[3]; const int channel = static_cast<int>(in_data.shape_[1]); const int step = channel * length; #pragma omp parallel for for (auto n = 0; n < batch_size; ++n) { NormalizeBackwardImpl(inputs, outputs, req, length, channel, n*step, std); } } } template<typename DType> inline DType saturate_cast(const float& src) { return static_cast<DType>(src); } template<> inline uint8_t saturate_cast(const float& src) { return std::min(std::max(src, 0.f), 255.f); } inline bool ImageShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { mxnet::TShape& dshape = (*in_attrs)[0]; CHECK_EQ(dshape.ndim(), 3) << "Input image must have shape (height, width, channels), but got " << dshape; auto nchannels = dshape[dshape.ndim()-1]; CHECK(nchannels == 3 || nchannels == 1) << "The last dimension of input image must be the channel dimension with " << "either 1 or 3 elements, but got input with shape " << dshape; SHAPE_ASSIGN_CHECK(*out_attrs, 0, dshape); return true; } template<typename DType, int axis> void FlipImpl(const mxnet::TShape &shape, DType *src, DType *dst) { int head = 1, mid = shape[axis], tail = 1; for (int i = 0; i < axis; ++i) head *= shape[i]; for (int i = axis+1; i < shape.ndim(); ++i) tail *= shape[i]; for (int i = 0; i < head; ++i) { // if inplace flip, skip the mid point in axis, otherwise copy is required int mid2 = (src == dst) ? mid >> 1 : (mid + 1) >> 1; for (int j = 0; j < mid2; ++j) { int idx1 = (i*mid + j) * tail; int idx2 = idx1 + (mid-(j << 1)-1) * tail; for (int k = 0; k < tail; ++k, ++idx1, ++idx2) { DType tmp = src[idx1]; dst[idx1] = src[idx2]; dst[idx2] = tmp; } } } } inline void FlipLeftRight(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); }); } inline void FlipTopBottom(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); }); } struct RandomFlipParam : public dmlc::Parameter<RandomFlipParam> { float p; DMLC_DECLARE_PARAMETER(RandomFlipParam) { DMLC_DECLARE_FIELD(p) .set_default(0.5f) .describe("The probablity of flipping the image."); } }; inline void RandomFlipLeftRight( const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomFlipParam &param = nnvm::get<RandomFlipParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); std::normal_distribution<float> dist(0, 1); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if (dist(prnd->GetRndEngine()) > param.p) { if (outputs[0].dptr_ != inputs[0].dptr_) { std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType)); } } else { FlipImpl<DType, 1>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); } }); } inline void RandomFlipTopBottom( const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomFlipParam &param = nnvm::get<RandomFlipParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); std::normal_distribution<float> dist(0, 1); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { if (dist(prnd->GetRndEngine()) > param.p) { if (outputs[0].dptr_ != inputs[0].dptr_) { std::memcpy(outputs[0].dptr_, inputs[0].dptr_, inputs[0].Size() * sizeof(DType)); } } else { FlipImpl<DType, 0>(inputs[0].shape_, inputs[0].dptr<DType>(), outputs[0].dptr<DType>()); } }); } struct RandomEnhanceParam : public dmlc::Parameter<RandomEnhanceParam> { float min_factor; float max_factor; DMLC_DECLARE_PARAMETER(RandomEnhanceParam) { DMLC_DECLARE_FIELD(min_factor) .set_lower_bound(0.0) .describe("Minimum factor."); DMLC_DECLARE_FIELD(max_factor) .set_lower_bound(0.0) .describe("Maximum factor."); } }; inline void AdjustBrightnessImpl(const float& alpha_b, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; int length = inputs[0].Size(); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); for (int l = 0; l < length; ++l) { float val = static_cast<float>(input[l]) * alpha_b; output[l] = saturate_cast<DType>(val); } }); } inline void RandomBrightness(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); float alpha_b = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustBrightnessImpl(alpha_b, ctx, inputs, req, outputs); } inline void AdjustContrastImpl(const float& alpha_c, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; static const float coef[] = { 0.299f, 0.587f, 0.114f }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int nchannels = inputs[0].shape_[2]; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); float sum = 0.f; if (nchannels > 1) { for (int l = 0; l < length; ++l) { for (int c = 0; c < 3; ++c) sum += input[l*3 + c] * coef[c]; } } else { for (int l = 0; l < length; ++l) sum += input[l]; } float gray_mean = sum / static_cast<float>(length); float beta = (1 - alpha_c) * gray_mean; for (int l = 0; l < length * nchannels; ++l) { float val = input[l] * alpha_c + beta; output[l] = saturate_cast<DType>(val); } }); } inline void RandomContrast(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha_c = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustContrastImpl(alpha_c, ctx, inputs, req, outputs); } inline void AdjustSaturationImpl(const float& alpha_s, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { static const float coef[] = { 0.299f, 0.587f, 0.114f }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int nchannels = inputs[0].shape_[2]; float alpha_o = 1.f - alpha_s; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); if (nchannels == 1) { for (int l = 0; l < length; ++l) output[l] = input[l]; return; } for (int l = 0; l < length; ++l) { float gray = 0.f; for (int c = 0; c < 3; ++c) { gray = input[l*3 + c] * coef[c]; } gray *= alpha_o; for (int c = 0; c < 3; ++c) { float val = gray + input[l*3 + c] * alpha_s; output[l*3 + c] = saturate_cast<DType>(val); } } }); } inline void RandomSaturation(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha_s = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustSaturationImpl(alpha_s, ctx, inputs, req, outputs); } inline void RGB2HLSConvert(const float& src_r, const float& src_g, const float& src_b, float *dst_h, float *dst_l, float *dst_s) { float b = src_b / 255.f, g = src_g / 255.f, r = src_r / 255.f; float h = 0.f, s = 0.f, l; float vmin; float vmax; float diff; vmax = vmin = r; vmax = std::fmax(vmax, g); vmax = std::fmax(vmax, b); vmin = std::fmin(vmin, g); vmin = std::fmin(vmin, b); diff = vmax - vmin; l = (vmax + vmin) * 0.5f; if (diff > std::numeric_limits<float>::epsilon()) { s = (l < 0.5f) * diff / (vmax + vmin); s += (l >= 0.5f) * diff / (2.0f - vmax - vmin); diff = 60.f / diff; h = (vmax == r) * (g - b) * diff; h += (vmax != r && vmax == g) * ((b - r) * diff + 120.f); h += (vmax != r && vmax != g) * ((r - g) * diff + 240.f); h += (h < 0.f) * 360.f; } *dst_h = h; *dst_l = l; *dst_s = s; } inline void HLS2RGBConvert(const float& src_h, const float& src_l, const float& src_s, float *dst_r, float *dst_g, float *dst_b) { static const int c_HlsSectorData[6][3] = { { 1, 3, 0 }, { 1, 0, 2 }, { 3, 0, 1 }, { 0, 2, 1 }, { 0, 1, 3 }, { 2, 1, 0 } }; float h = src_h, l = src_l, s = src_s; float b = l, g = l, r = l; if (s != 0) { float p2 = (l <= 0.5f) * l * (1 + s); p2 += (l > 0.5f) * (l + s - l * s); float p1 = 2 * l - p2; h *= 1.f / 60.f; if (h < 0) { do { h += 6; } while (h < 0); } else if (h >= 6) { do { h -= 6; } while (h >= 6); } int sector = static_cast<int>(h); h -= sector; float tab[4]; tab[0] = p2; tab[1] = p1; tab[2] = p1 + (p2 - p1) * (1 - h); tab[3] = p1 + (p2 - p1) * h; b = tab[c_HlsSectorData[sector][0]]; g = tab[c_HlsSectorData[sector][1]]; r = tab[c_HlsSectorData[sector][2]]; } *dst_b = b * 255.f; *dst_g = g * 255.f; *dst_r = r * 255.f; } inline void AdjustHueImpl(float alpha, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { int length = inputs[0].shape_[0] * inputs[0].shape_[1]; if (inputs[0].shape_[2] == 1) return; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* input = inputs[0].dptr<DType>(); DType* output = outputs[0].dptr<DType>(); for (int i = 0; i < length; ++i) { float h, l, s; float r = static_cast<float>(*(input++)); float g = static_cast<float>(*(input++)); float b = static_cast<float>(*(input++)); RGB2HLSConvert(r, g, b, &h, &l, &s); h += alpha * 360.f; HLS2RGBConvert(h, l, s, &r, &g, &b); *(output++) = saturate_cast<DType>(r); *(output++) = saturate_cast<DType>(g); *(output++) = saturate_cast<DType>(b); } }); } inline void RandomHue(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomEnhanceParam &param = nnvm::get<RandomEnhanceParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); float alpha = std::uniform_real_distribution<float>( param.min_factor, param.max_factor)(prnd->GetRndEngine()); AdjustHueImpl(alpha, ctx, inputs, req, outputs); } struct RandomColorJitterParam : public dmlc::Parameter<RandomColorJitterParam> { float brightness; float contrast; float saturation; float hue; DMLC_DECLARE_PARAMETER(RandomColorJitterParam) { DMLC_DECLARE_FIELD(brightness) .describe("How much to jitter brightness."); DMLC_DECLARE_FIELD(contrast) .describe("How much to jitter contrast."); DMLC_DECLARE_FIELD(saturation) .describe("How much to jitter saturation."); DMLC_DECLARE_FIELD(hue) .describe("How much to jitter hue."); } }; inline void RandomColorJitter(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomColorJitterParam &param = nnvm::get<RandomColorJitterParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, real_t>(s); int order[4] = {0, 1, 2, 3}; std::shuffle(order, order + 4, prnd->GetRndEngine()); bool flag = false; for (int i = 0; i < 4; ++i) { switch (order[i]) { case 0: if (param.brightness > 0) { float alpha_b = 1.0 + std::uniform_real_distribution<float>( -param.brightness, param.brightness)(prnd->GetRndEngine()); AdjustBrightnessImpl(alpha_b, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 1: if (param.contrast > 0) { float alpha_c = 1.0 + std::uniform_real_distribution<float>( -param.contrast, param.contrast)(prnd->GetRndEngine()); AdjustContrastImpl(alpha_c, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 2: if (param.saturation > 0) { float alpha_s = 1.f + std::uniform_real_distribution<float>( -param.saturation, param.saturation)(prnd->GetRndEngine()); AdjustSaturationImpl(alpha_s, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; case 3: if (param.hue > 0) { float alpha_h = std::uniform_real_distribution<float>( -param.hue, param.hue)(prnd->GetRndEngine()); AdjustHueImpl(alpha_h, ctx, flag ? outputs : inputs, req, outputs); flag = true; } break; } } } struct AdjustLightingParam : public dmlc::Parameter<AdjustLightingParam> { mxnet::Tuple<float> alpha; DMLC_DECLARE_PARAMETER(AdjustLightingParam) { DMLC_DECLARE_FIELD(alpha) .describe("The lighting alphas for the R, G, B channels."); } }; struct RandomLightingParam : public dmlc::Parameter<RandomLightingParam> { float alpha_std; DMLC_DECLARE_PARAMETER(RandomLightingParam) { DMLC_DECLARE_FIELD(alpha_std) .set_default(0.05) .describe("Level of the lighting noise."); } }; inline void AdjustLightingImpl(const mxnet::Tuple<float>& alpha, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { static const float eig[3][3] = { { 55.46 * -0.5675, 4.794 * 0.7192, 1.148 * 0.4009 }, { 55.46 * -0.5808, 4.794 * -0.0045, 1.148 * -0.8140 }, { 55.46 * -0.5836, 4.794 * -0.6948, 1.148 * 0.4203 } }; int length = inputs[0].shape_[0] * inputs[0].shape_[1]; int channels = inputs[0].shape_[2]; if (channels == 1) return; float pca_r = eig[0][0] * alpha[0] + eig[0][1] * alpha[1] + eig[0][2] * alpha[2]; float pca_g = eig[1][0] * alpha[0] + eig[1][1] * alpha[1] + eig[1][2] * alpha[2]; float pca_b = eig[2][0] * alpha[0] + eig[2][1] * alpha[1] + eig[2][2] * alpha[2]; MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { DType* output = outputs[0].dptr<DType>(); DType* input = inputs[0].dptr<DType>(); for (int i = 0; i < length; i++) { int base_ind = 3 * i; float in_r = static_cast<float>(input[base_ind]); float in_g = static_cast<float>(input[base_ind + 1]); float in_b = static_cast<float>(input[base_ind + 2]); output[base_ind] = saturate_cast<DType>(in_r + pca_r); output[base_ind + 1] = saturate_cast<DType>(in_g + pca_g); output[base_ind + 2] = saturate_cast<DType>(in_b + pca_b); } }); } inline void AdjustLighting(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const AdjustLightingParam &param = nnvm::get<AdjustLightingParam>(attrs.parsed); AdjustLightingImpl(param.alpha, ctx, inputs, req, outputs); } inline void RandomLighting(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; const RandomLightingParam &param = nnvm::get<RandomLightingParam>(attrs.parsed); Stream<cpu> *s = ctx.get_stream<cpu>(); Random<cpu> *prnd = ctx.requested[0].get_random<cpu, float>(s); std::normal_distribution<float> dist(0, param.alpha_std); float alpha_r = dist(prnd->GetRndEngine()); float alpha_g = dist(prnd->GetRndEngine()); float alpha_b = dist(prnd->GetRndEngine()); AdjustLightingImpl({alpha_r, alpha_g, alpha_b}, ctx, inputs, req, outputs); } #define MXNET_REGISTER_IMAGE_AUG_OP(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ImageShape) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<nnvm::FGradient>("FGradient", ElemwiseGradUseNone{ "_copy" }) \ .add_argument("data", "NDArray-or-Symbol", "The input.") #define MXNET_REGISTER_IMAGE_RND_AUG_OP(name) \ MXNET_REGISTER_IMAGE_AUG_OP(name) \ .set_attr<FResourceRequest>("FResourceRequest", \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kRandom}; \ }) } // namespace image } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_IMAGE_IMAGE_RANDOM_INL_H_
DRB024-simdtruedep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has data races due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdio.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int len = 100; int a[100]; int b[100]; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; b[i] = i + 1; } for (i = 0; i <= len - 1 - 1; i += 1) { a[i + 1] = a[i] + b[i]; } for (i = 0; i <= len - 1; i += 1) { printf("i=%d a[%d]=%d\n",i,i,a[i]); } return 0; }
GB_unaryop__minv_uint64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint64_uint32 // op(A') function: GB_tran__minv_uint64_uint32 // C type: uint64_t // A type: uint32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 64) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 64) ; // casting #define GB_CASTING(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint64_uint32 ( uint64_t *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sieve.c
/* * Tempo Sequencial * *real 0m4.047s *user 0m3.960s *sys 0m0.080s * *real 0m4.053s *user 0m3.967s *sys 0m0.080s * *real 0m4.042s *user 0m3.962s *sys 0m0.072s * *real 0m4.044s *user 0m3.953s *sys 0m0.084s * *real 0m4.045s *user 0m3.967s *sys 0m0.072s * * Tempo paralelo (Atividade 03) * *real 0m3.700s *user 0m6.857s *sys 0m0.104s * *real 0m3.661s *user 0m6.813s *sys 0m0.072s * *real 0m3.797s *user 0m6.955s *sys 0m0.096s * *real 0m3.647s *user 0m6.817s *sys 0m0.076s * *real 0m3.890s *user 0m7.107s *sys 0m0.088s * * Tempo paralelo(Atividade 07) * *real 0m2.682s *user 0m10.331s *sys 0m0.080s * *real 0m2.684s *user 0m10.331s *sys 0m0.080s * *real 0m2.689s *user 0m10.309s *sys 0m0.083s * *real 0m2.659s *user 0m9.956s *sys 0m0.103s * *real 0m2.698s *user 0m10.333s *sys 0m0.084s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <math.h> int sieveOfEratosthenes(int n) { // Create a boolean array "prime[0..n]" and initialize // all entries it as true. A value in prime[i] will // finally be false if i is Not a prime, else true. int primes = 0; bool *prime = (bool*) malloc((n+1)*sizeof(bool)); int sqrt_n = sqrt(n); memset(prime, true,(n+1)*sizeof(bool)); int i, p; #pragma omp parallel for schedule (dynamic,100) for (p=2; p <= sqrt_n; p++) { // If prime[p] is not changed, then it is a prime if (prime[p] == true) { // Update all multiples of p #pragma omp parallel for for(i=p*2; i<=n; i += p) prime[i] = false; } } // count prime numbers #pragma omp parallel for reduction(+:primes) for (int p=2; p<=n; p++) if (prime[p]) primes++; return(primes); } int main() { int n = 100000000; printf("%d\n",sieveOfEratosthenes(n)); return 0; }
prime.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> int main(int argc,char *argv[]){ int index; int i; int count; int first; int n; int N; int prime; char *marked; double start, delta; omp_set_num_threads(1); if(argc!=2){ printf("Command line: %s <m>\n",argv[0]); exit(1); } n = atoi(argv[1]); N = n+1; marked = (char *) malloc (N); if (marked==NULL){ printf("Cannot allocate enough memory\n"); exit(1); } for(i=0;i<N;i++){ marked[i]=1; } marked[0]=0; marked[1]=0; index=2; prime=2; start = omp_get_wtime(); while(prime*prime<=n){ first = 2*prime; #pragma omp parallel for for (i =first;i<N;i+=prime) marked[i]=0; while(!marked[++index]); prime=index; } count=0; for(i=0;i<N;i++){ count+=marked[i]; } delta = omp_get_wtime() - start; printf("\nThere are %d primes less than or equal to %d\n\n",count,n); printf("%.6g seconds\n",delta); return 0; }
simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd safelen(4) void test_no_clause() { int i; #pragma omp simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}} #pragma omp simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd firstprivate(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, ) for (i = 0; i < 16; ++i) ; // xxpected-error@+1 {{expected expression}} #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} #pragma omp simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as reduction}} #pragma omp parallel #pragma omp simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp parallel #pragma omp for for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-error@+1 {{expected expression}} #pragma omp simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction() for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction( : x) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(, for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(+ for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+: for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int f; #pragma omp simd linear(f) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(val(f)) for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(uval(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(ref(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; #pragma omp simd linear(foo(f)) // expected-error {{expected 'val' modifier}} for (int k = 0; k < argc; ++k) ++k; }
crossbar.h
#ifndef _CROSSBAR_H #define _CROSSBAR_H #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <omp.h> #include <random> #include "config.h" #define NSUM 25 using namespace std; typedef struct Crossbar { float *std_d; int CB_l; int CB_w; int CB_n; float *CB_cell; float *input; float *output; float *CB_std; std::default_random_engine eng; Crossbar(){} Crossbar(int n, int l, int w){ CB_n = n; CB_l = l; CB_w = w; CB_cell = new float[CB_l*CB_w]; CB_std = new float[CB_l*CB_w]; input = new float[CB_l*(AD_WIDTH/DA_WIDTH)]; output = new float[CB_w*(AD_WIDTH/DA_WIDTH)]; } ~Crossbar(){ delete []CB_cell; delete []CB_std; delete []input; delete []output; } void init(){ float *tmp_cells = new float[CB_l*CB_w]; // transform cb_cell for (int i = 0; i < CB_w; i++){ for (int j = 0; j < CB_l; j++){ tmp_cells[i*CB_l + j] = CB_cell[j*CB_w + i]; } } memcpy(CB_cell, tmp_cells, sizeof(float)*CB_l*CB_w); delete []tmp_cells; get_std(); } void init(float *CB_cells, int n, int l, int w) { CB_l = l; CB_w = w; CB_n = n; CB_cell = new float[CB_l*CB_w]; input = new float[CB_l]; output = new float[CB_w]; // memcpy(CB_cell, CB_cells, CB_l*CB_w * sizeof(float)); // transform cb_cell for (int i = 0; i < CB_w; i++){ for (int j = 0; j < CB_l; j++){ CB_cell[i*CB_l + j] = CB_cells[j*CB_w + i]; // +get_noise(CB_cells[j*CB_w + i]); } } get_std(); } void get_std(){ float max_conductance = 40; // 25k ohm CB_std = new float[CB_l*CB_w]; for (int i = 0; i < CB_w; ++i) { for (int j = 0; j < CB_l; ++j) { float tmp = fabsf(CB_cell[i*CB_l+j]); // CB_std[i*CB_l+j] = -0.0006034 * (tmp * 1000) * (tmp * 1000) + 0.06184 * tmp + 0.948661*0.000001; CB_std[i*CB_l+j] = (-0.0006034 * (tmp * max_conductance + 4) * (tmp * max_conductance + 4) + 0.06184 * (tmp * max_conductance + 4) + 0.7240) / max_conductance; } } } double gaussrand() { static double V1, V2, S; static int phase = 0; double X; if (phase == 0) { do { double U1 = (double)rand() / RAND_MAX; double U2 = (double)rand() / RAND_MAX; V1 = 2 * U1 - 1; V2 = 2 * U2 - 1; S = V1 * V1 + V2 * V2; } while (S >= 1 || S == 0); X = V1 * sqrt(-2 * log(S) / S); } else X = V2 * sqrt(-2 * log(S) / S); phase = 1 - phase; return X; } double mygaussrand() { double x = 0; int i; for(i = 0; i < NSUM; i++) { x += (double)rand() / RAND_MAX; } x -= NSUM / 2.0; x /= sqrt(NSUM / 12.0); return x; } double mygaussrand2(){ std::default_random_engine eng; std::normal_distribution<double> n(0, 1); return n(eng); } float get_noise(float x) { // float noise; x = fabsf(x); float noise = -0.0006034 * (x * 1000) * (x * 1000) + 0.06184 * x + 0.948661*0.000001; noise = noise * gaussrand(); return noise; } void MatrixMul(float *input, float *CB_cells, float *output, int w, int l) { int i = 0; std::normal_distribution<float> norm(0, 1); #pragma omp parallel for private(i) //shared(w, l) for (i = 0; i < w; i++){ float tmp = 0; int tmp_k = i*l; int j = 0; #pragma omp parallel for private(j) reduction(+:tmp) shared(tmp_k)//, input, CB_cells) for (j = 0; j < l; j++){ // float tmpres = input[j] * (CB_cells[tmp_k+j] + (CB_std[tmp_k+j] * mygaussrand2())); float tmpres = input[j] * (CB_cells[tmp_k+j] + (CB_std[tmp_k+j] * norm(eng))); tmp = tmp + tmpres; } output[i] = tmp; } } void run(){ // crossbar computation for big crossbar int i = 0; int s = 0; std::normal_distribution<float> norm(0, 1); for (s = 0; s < AD_WIDTH/DA_WIDTH; s++) { // considering DA and AD #pragma omp parallel for private(i) for (i = 0; i < CB_w; i++) { float tmp = 0; int tmp_k = i * CB_l; int tmp_m = s * CB_l; int j = 0; #pragma omp parallel for private(j) reduction(+:tmp) shared(tmp_k, tmp_m) for (j = 0; j < CB_l; j++) { float tmpres = input[tmp_m + j] * (CB_cell[tmp_k + j] + (CB_std[tmp_k+j] * norm(eng))); tmp = tmp + tmpres; } output[s * CB_w + i] = tmp; } } } void run(float *input, float *output, bool noise=true) { float *output_d = new float[CB_w]; float *input_d = new float[CB_l]; memcpy(input_d, input, CB_l * sizeof(float)); MatrixMul(input_d, CB_cell, output_d, CB_w, CB_l); memcpy(output, output_d, CB_w * sizeof(float)); delete[] output_d; delete[] input_d; } void free_space() { delete []CB_cell; delete []CB_std; delete []input; delete []output; } }CROSSBAR; CROSSBAR entire_cb(1, ENTIRE_L, ENTIRE_W); #endif // !_CROSSBAR_H
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-private.h" #include "magick/channel.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/exception-private.h" #include "magick/enhance.h" #include "magick/image.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/resource_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; const Image *next; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if ((next->columns != image->columns) || (next->rows != image->rows)) ThrowImageException(OptionError,"ImagesAreNotTheSameSize"); } combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse) { InheritException(exception,&combine_image->exception); combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace); else (void) SetImageColorspace(combine_image,sRGBColorspace); } if ((channel & OpacityChannel) != 0) combine_image->matte=MagickTrue; (void) SetImageBackgroundColor(combine_image); /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; PixelPacket *pixels; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } next=image; if (((channel & RedChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelRed(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & GreenChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelGreen(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & BlueChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelBlue(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL)) { image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,p))); p++; q++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace) && (next != (Image *) NULL)) { IndexPacket *indexes; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(combine_view); for (x=0; x < (ssize_t) combine_image->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(GetPixelIntensity(image,p))); p++; } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (IsGrayColorspace(combine_image->colorspace) != MagickFalse) (void) TransformImageColorspace(combine_image,sRGBColorspace); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->matte); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImageChannel() separates a channel from the image and returns it as % a grayscale image. A channel is a particular color component of each pixel % in the image. % % The format of the SeparateImageChannel method is: % % MagickBooleanType SeparateImageChannel(Image *image, % const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % */ MagickExport Image *SeparateImage(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *separate_image; MagickBooleanType status; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); status=SeparateImageChannel(separate_image,channel); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } MagickExport MagickBooleanType SeparateImageChannel(Image *image, const ChannelType channel) { #define SeparateImageTag "Separate/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (channel == GrayChannels) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); /* Separate image channels. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); switch (channel) { case RedChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); q++; } break; } case GreenChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelGreen(q)); SetPixelBlue(q,GetPixelGreen(q)); q++; } break; } case BlueChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelBlue(q)); SetPixelGreen(q,GetPixelBlue(q)); q++; } break; } case OpacityChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelOpacity(q)); SetPixelGreen(q,GetPixelOpacity(q)); SetPixelBlue(q,GetPixelOpacity(q)); q++; } break; } case BlackChannel: { if ((image->storage_class != PseudoClass) && (image->colorspace != CMYKColorspace)) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelIndex(indexes+x)); SetPixelGreen(q,GetPixelIndex(indexes+x)); SetPixelBlue(q,GetPixelIndex(indexes+x)); q++; } break; } case TrueAlphaChannel: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,GetPixelAlpha(q)); SetPixelGreen(q,GetPixelAlpha(q)); SetPixelBlue(q,GetPixelAlpha(q)); q++; } break; } case GrayChannels: { for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(q,ClampToQuantum(GetPixelIntensity(image,q))); q++; } break; } default: break; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImageChannel) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (channel != GrayChannels) { image->matte=MagickFalse; image->intensity=Rec709LuminancePixelIntensityMethod; (void) SetImageColorspace(image,LinearGRAYColorspace); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % MagickBooleanType SeparateImages(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channels to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, % YellowChannel, or BlackChannel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,const ChannelType channel, ExceptionInfo *exception) { Image *images, *separate_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); if ((channel & RedChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,RedChannel); AppendImageToList(&images,separate_image); } if ((channel & GreenChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,GreenChannel); AppendImageToList(&images,separate_image); } if ((channel & BlueChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlueChannel); AppendImageToList(&images,separate_image); } if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace)) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,BlackChannel); AppendImageToList(&images,separate_image); } if ((channel & AlphaChannel) != 0) { separate_image=CloneImage(image,0,0,MagickTrue,exception); (void) SeparateImageChannel(separate_image,TrueAlphaChannel); AppendImageToList(&images,separate_image); } return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelType alpha_type) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, Disassociate, % DeactivateAlphaChannel, ExtractAlphaChannel, OpaqueAlphaChannel, % ResetAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, and % TransparentAlphaChannel. % */ MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelType alpha_type) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); exception=(&image->exception); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->matte=MagickTrue; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case BackgroundAlphaChannel: { IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; /* Set transparent pixels to background color. */ if (image->matte == MagickFalse) break; status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; SetPixelPacket(image,&background,&pixel,&index); status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (q->opacity == TransparentOpacity) { SetPixelRed(q,pixel.red); SetPixelGreen(q,pixel.green); SetPixelBlue(q,pixel.blue); } q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: case ShapeAlphaChannel: { /* Special usage case for SeparateImageChannel(): copy grayscale color to the alpha channel. */ status=SeparateImageChannel(image,GrayChannels); image->matte=MagickTrue; /* make sure transparency is now on! */ if (alpha_type == ShapeAlphaChannel) { MagickPixelPacket background; /* Reset all color channels to background color. */ GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *) NULL,&background); (void) LevelColorsImage(image,&background,&background,MagickTrue); } break; } case DeactivateAlphaChannel: { image->matte=MagickFalse; break; } case DisassociateAlphaChannel: { status=SetImageStorageClass(image,DirectClass); if (status == MagickFalse) break; image->matte=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double alpha, gamma; alpha=QuantumScale*GetPixelAlpha(q); gamma=PerceptibleReciprocal(alpha); SetPixelRed(q,ClampToQuantum(gamma*GetPixelRed(q))); SetPixelGreen(q,ClampToQuantum(gamma*GetPixelGreen(q))); SetPixelBlue(q,ClampToQuantum(gamma*GetPixelBlue(q))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->matte=MagickFalse; break; } case ExtractAlphaChannel: { status=SeparateImageChannel(image,TrueAlphaChannel); image->matte=MagickFalse; break; } case RemoveAlphaChannel: case FlattenAlphaChannel: { IndexPacket index; MagickPixelPacket background; PixelPacket pixel; /* Flatten image pixels over the background pixels. */ if (image->matte == MagickFalse) break; if (SetImageStorageClass(image,DirectClass) == MagickFalse) break; GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); (void) memset(&pixel,0,sizeof(pixel)); index=0; SetPixelPacket(image,&background,&pixel,&index); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, opacity; gamma=1.0-QuantumScale*QuantumScale*q->opacity*pixel.opacity; opacity=(double) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); q->red=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->red, (MagickRealType) q->opacity,(MagickRealType) pixel.red, (MagickRealType) pixel.opacity)); q->green=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->green, (MagickRealType) q->opacity,(MagickRealType) pixel.green, (MagickRealType) pixel.opacity)); q->blue=ClampToQuantum(gamma*MagickOver_((MagickRealType) q->blue, (MagickRealType) q->opacity,(MagickRealType) pixel.blue, (MagickRealType) pixel.opacity)); q->opacity=ClampToQuantum(opacity); q++; } if (image->colorspace == CMYKColorspace) { indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case ResetAlphaChannel: /* deprecated */ case OpaqueAlphaChannel: { status=SetImageOpacity(image,OpaqueOpacity); break; } case SetAlphaChannel: { if (image->matte == MagickFalse) status=SetImageOpacity(image,OpaqueOpacity); break; } case TransparentAlphaChannel: { status=SetImageOpacity(image,TransparentOpacity); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); return(SyncImagePixelCache(image,&image->exception)); }
Parallel.h
#pragma once #include <ATen/ATen.h> #include <cstddef> #ifdef _OPENMP #include <omp.h> #endif namespace at { namespace internal { // This parameter is heuristically chosen to determine the minimum number of // work that warrants paralellism. For example, when summing an array, it is // deemed inefficient to parallelise over arrays shorter than 32768. Further, // no parallel algorithm (such as parallel_reduce) should split work into // smaller than GRAIN_SIZE chunks. constexpr int64_t GRAIN_SIZE = 32768; } // namespace internal inline int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { #ifdef _OPENMP #pragma omp parallel if ((end - begin) >= grain_size) { int64_t num_threads = omp_get_num_threads(); int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) f(begin_tid, std::min(end, chunk_size + begin_tid)); } #else if (begin < end) { f(begin, end); } #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F f, const SF sf) { if (get_num_threads() == 1) { return f(begin, end, ident); } else { const int64_t num_results = divup((end - begin), grain_size); std::vector<scalar_t> results(num_results); scalar_t* results_data = results.data(); #pragma omp parallel for if ((end - begin) >= grain_size) for (int64_t id = 0; id < num_results; id++) { int64_t i = begin + id * grain_size; results_data[id] = f(i, i + std::min(end - i, grain_size), ident); } return std::accumulate( results_data, results_data + results.size(), ident, sf); } } } // namespace at
GB_unaryop__lnot_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint64_int16 // op(A') function: GB_tran__lnot_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint64_int16 ( uint64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
csr_kernels.h
#ifndef CSR_KERNELS_H #define CSR_KERNELS_H extern "C" { /*! * CSR matrix sketching with CountSketch and Gaussian transforms. Parallelized with OpenMP. * A is a n*d CSR matrix. S is a r*n CountSketch. C is a row major matrix. * If m == 0: C has size r * d and we only apply C <- S * A (G is not applied). * If m > 0: C has size m * d. In this case C <- G * S * A * (1/sqrt(m)) * G is a m*r matrix with elements from the standard normal distribution. * * @param d the number of columns of A and C. * @param m the number of rows of G. If m>0, it is also the number of rows of C. * @param n the number of rows of A and columns of S. * @param r the number of rows of S and columns of G. If m=0, r is also the number of rows of C. * @param A_indptr pointer to the indptr array of the CSR matrix A. * @param A_indices pointer to the indices array of the CSR matrix A. * @param A_data pointer to the data array of the CSR matrix A. * @param C pointer to the array of storing matrix C in row-major format. */ void csrcgs( const int d, const int m, const int n, const int r, const int nnz, int *const A_indptr, int *const A_indices, double *const A_data, double *const C ) { CountSketch S( r, n, std::thread::hardware_concurrency() ); S.populate(); if ( m == 0 ) { S.apply_csr( d, 1, A_indptr, A_indices, A_data, 0, C, 0, r ); } else { set_value( m, d, C, 0 ); const int block_size = d; const int n_blocks = static_cast<int>( std::ceil( static_cast<double>( r ) / static_cast<double>( block_size ) ) ); double *_G = new double[m * block_size]; double *_T = new double[block_size * d]; set_value( block_size, d, _T, 0 ); for ( int i = 0; i < n_blocks - 1; ++i ) { set_randn( m, block_size, _G ); S.apply_csr( d, 1, A_indptr, A_indices, A_data, 0, _T, i * block_size, ( i + 1 ) *block_size ); gemm( m, d, block_size, 1, _G, _T, 1, C ); } set_randn( m, block_size, _G ); S.apply_csr( d, 1, A_indptr, A_indices, A_data, 0, _T, ( n_blocks - 1 ) * block_size, r ); gemm( m, d, ( r - ( n_blocks - 1 ) *block_size ), 1, _G, _T, 1, C ); double scale_factor = static_cast<double>( 1 ) / sqrt( static_cast<double>( m ) ); scale( m, d, C, scale_factor ); delete[] _T; delete[] _G; } } /*! * Computes the diagonal of the matrix A * B * A' and stores it in the vector x. A is a CSR * matrix and B is dense in row-major format. Parallelized with OpenMP. * * @param m the number of rows of A. * @param n the number of columns of A and rows of B. * @param nnz the number of non-zero elements of A. * @param alpha scalar to multiply ( A * B ). * @param A_indptr pointer to the indptr array of the CSR matrix A. * @param A_indices pointer to the indices array of the CSR matrix A. * @param A_data pointer to the data array of the CSR matrix A. * @param beta scalar to multiply the vector x. * @param B pointer to the array of storing matrix B in row-major format. * @param x pointer to the array of storing vector x. */ void csrsqn( const int m, const int n, const int nnz, const double alpha, int *const A_indptr, int *const A_indices, double *const A_data, const double beta, double *const B, double *const x ) { int i = 0; if ( beta != 1 ) { scale( m, 1, x, beta ); } if ( alpha != 0 ) { #pragma omp parallel shared(i) { int up, lo, ind1, ind2, _i; double A_ij, x_i; double *_B; #pragma omp atomic capture _i = i++; for (; _i < m; ) { lo = A_indptr[_i]; up = A_indptr[_i + 1]; x_i = 0; if ( lo < up ) { for ( ind1 = lo; ind1 < up; ++ind1 ) { A_ij = A_data[ind1]; _B = & ( B[A_indices[ind1] * n] ); for ( ind2 = lo; ind2 < up; ++ind2 ) { x_i += A_ij * A_data[ind2] * _B[A_indices[ind2]]; } } } x[_i] += alpha * x_i; #pragma omp atomic capture _i = i++; } } } } /*! * Computes: C <- alpha * A' * A + beta * C. Parallelized with OpenMP. A' is the transpose of * the CSR matrix A and C is dense and stored in row-major format. * * @param m the number of rows of A. * @param n the number of columns of A, rows of C and columns of C. * @param nnz the number of non-zero elements of A. * @param alpha scalar to multiply ( A' * A ). * @param A_indptr pointer to the indptr array of the CSR matrix A. * @param A_indices pointer to the indices array of the CSR matrix A. * @param A_data pointer to the data array of the CSR matrix A. * @param beta scalar to multiply the matrix C. * @param C pointer to the array of storing matrix C in row-major format. */ void csrrk( const int m, const int n, const int nnz, const double alpha, int *const A_indptr, int *const A_indices, double *const A_data, const double beta, double *const C ) { int i, k; double *_C; if ( beta != 1 ) { scale( n, n, C, beta ); } if ( alpha == 0 ) { return; } #pragma omp parallel private(i, k, _C) { const int thread_id = omp_get_thread_num(); std::pair<int, int> limits; int up, lo, k_ind, j_ind; double A_ki; int block_size = static_cast<int>( std::ceil( static_cast<double>( n ) / static_cast<double> ( omp_get_num_threads() ) ) ); limits.first = block_size * thread_id; limits.second = block_size * ( thread_id + 1 ); limits.second = std::min( limits.second, n ); for ( k = 0; k < m; ++k ) { lo = A_indptr[k]; up = A_indptr[k + 1]; for ( k_ind = lo; k_ind < up; ++k_ind ) { i = A_indices[k_ind]; if ( ( i >= limits.first ) && ( i < limits.second ) ) { A_ki = alpha * A_data[k_ind]; _C = & ( C[i * n] ); for ( j_ind = lo; j_ind < up; ++j_ind ) { _C[A_indices[j_ind]] += A_ki * A_data[j_ind]; } } } } } } /*! * Computes: C <- A' * G * (1/sqrt(m)). Parallelized with OpenMP. * - A' has size d*n, and is the transpose of the n*d CSR matrix A * - G has elements from the standard normal distribution. * - C has size d*m and is stored in row-major format * * @param d the number of columns of A * @param m the number of columns of C * @param n the number of rows of A * @param A_indptr pointer to the indptr array of the CSR matrix A * @param A_indices pointer to the indices array of the CSR matrix A * @param A_data pointer to the data array of the CSR matrix A * @param beta scalar to multiply the matrix C * @param C pointer to the array of storing matrix C in row-major format */ void csrjlt( int d, int m, int n, int nnz, int *A_indptr, int *A_indices, double *A_data, double *C ) { set_value( m, d, C, 0 ); double *_C; int i, j, k, up, lo; #pragma omp parallel private(_C, i, j, k, up, lo) { double A_ki; const int thread_id = omp_get_thread_num(); const int block_size = static_cast<int>( std::ceil( static_cast<double>( m ) / static_cast<double> ( omp_get_num_threads() ) ) ); std::pair<int, int> row_limits; row_limits.first = block_size * thread_id; row_limits.second = block_size * ( thread_id + 1 ); row_limits.second = std::min( row_limits.second, m ); const int n_rows = row_limits.second - row_limits.first; double *_G = new double[n_rows]; std::random_device rd{}; std::mt19937_64 gen{rd()}; std::normal_distribution<double> dist; for ( k = 0; k < n; ++k ) { lo = A_indptr[k]; up = A_indptr[k + 1]; if ( lo < up ) { # pragma omp simd for ( j = 0; j < n_rows; ++j ) { _G[j] = dist( gen ); } for ( i = lo; i < up; ++i ) { A_ki = A_data[i]; _C = & ( C[A_indices[i] * m + row_limits.first] ); #pragma omp simd for ( j = 0; j < n_rows; ++j ) { _C[j] += A_ki * _G[j]; } } } } delete[] _G; } double scale_factor = static_cast<double>( 1 ) / sqrt( static_cast<double>( m ) ); scale( m, d, C, scale_factor ); } } #endif
prospector.c
#define _DEFAULT_SOURCE // MAP_ANONYMOUS #include <math.h> #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <fcntl.h> #include <dlfcn.h> #include <unistd.h> #include <sys/mman.h> #include <sys/time.h> #define ABI __attribute__((sysv_abi)) #define countof(a) ((int)(sizeof(a) / sizeof(0[a]))) static uint64_t xoroshiro128plus(uint64_t s[2]) { uint64_t s0 = s[0]; uint64_t s1 = s[1]; uint64_t result = s0 + s1; s1 ^= s0; s[0] = ((s0 << 24) | (s0 >> 40)) ^ s1 ^ (s1 << 16); s[1] = (s1 << 37) | (s1 >> 27); return result; } enum hf_type { /* 32 bits */ HF32_XOR, // x ^= const32 HF32_MUL, // x *= const32 (odd) HF32_ADD, // x += const32 HF32_ROT, // x = (x << const5) | (x >> (32 - const5)) HF32_NOT, // x = ~x HF32_BSWAP,// x = bswap32(x) HF32_XORL, // x ^= x << const5 HF32_XORR, // x ^= x >> const5 HF32_ADDL, // x += x << const5 HF32_SUBL, // x -= x << const5 /* 64 bits */ HF64_XOR, HF64_MUL, HF64_ADD, HF64_ROT, HF64_NOT, HF64_BSWAP, HF64_XORL, HF64_XORR, HF64_ADDL, HF64_SUBL, }; static const char hf_names[][8] = { [HF32_XOR] = "32xor", [HF32_MUL] = "32mul", [HF32_ADD] = "32add", [HF32_ROT] = "32rot", [HF32_NOT] = "32not", [HF32_BSWAP]= "32bswap", [HF32_XORL] = "32xorl", [HF32_XORR] = "32xorr", [HF32_ADDL] = "32addl", [HF32_SUBL] = "32subl", [HF64_XOR] = "64xor", [HF64_MUL] = "64mul", [HF64_ADD] = "64add", [HF64_ROT] = "64rot", [HF64_NOT] = "64not", [HF64_BSWAP]= "64bswap", [HF64_XORL] = "64xorl", [HF64_XORR] = "64xorr", [HF64_ADDL] = "64addl", [HF64_SUBL] = "64subl", }; #define FOP_LOCKED (1 << 0) struct hf_op { enum hf_type type; uint64_t constant; int flags; }; /* Randomize the constants of the given hash operation. */ static void hf_randomize(struct hf_op *op, uint64_t s[2]) { uint64_t r = xoroshiro128plus(s); switch (op->type) { case HF32_NOT: case HF64_NOT: case HF32_BSWAP: case HF64_BSWAP: op->constant = 0; break; case HF32_XOR: case HF32_ADD: op->constant = (uint32_t)r; break; case HF32_MUL: op->constant = (uint32_t)r | 1; break; case HF32_ROT: case HF32_XORL: case HF32_XORR: case HF32_ADDL: case HF32_SUBL: op->constant = 1 + r % 31; break; case HF64_XOR: case HF64_ADD: op->constant = r; break; case HF64_MUL: op->constant = r | 1; break; case HF64_ROT: case HF64_XORL: case HF64_XORR: case HF64_ADDL: case HF64_SUBL: op->constant = 1 + r % 63; break; } } #define F_U64 (1 << 0) #define F_TINY (1 << 1) // don't use big constants static void hf_gen(struct hf_op *op, uint64_t s[2], int flags) { uint64_t r = xoroshiro128plus(s); int min = flags & F_TINY ? 3 : 0; op->type = (r % (9 - min)) + min + (flags & F_U64 ? 9 : 0); hf_randomize(op, s); } /* Return 1 if these operations may be adjacent */ static int hf_type_valid(enum hf_type a, enum hf_type b) { switch (a) { case HF32_NOT: case HF32_BSWAP: case HF32_XOR: case HF32_MUL: case HF32_ADD: case HF32_ROT: case HF64_NOT: case HF64_BSWAP: case HF64_XOR: case HF64_MUL: case HF64_ADD: case HF64_ROT: return a != b; case HF32_XORL: case HF32_XORR: case HF32_ADDL: case HF32_SUBL: case HF64_XORL: case HF64_XORR: case HF64_ADDL: case HF64_SUBL: return 1; } abort(); } static void hf_genfunc(struct hf_op *ops, int n, int flags, uint64_t s[2]) { hf_gen(ops, s, flags); for (int i = 1; i < n; i++) { do { hf_gen(ops + i, s, flags); } while (!hf_type_valid(ops[i - 1].type, ops[i].type)); } } /* Randomize the parameters of the given functoin. */ static void hf_randfunc(struct hf_op *ops, int n, uint64_t s[2]) { for (int i = 0; i < n; i++) if (!(ops[i].flags & FOP_LOCKED)) hf_randomize(ops + i, s); } static void hf_print(const struct hf_op *op, char *buf) { unsigned long long c = op->constant; switch (op->type) { case HF32_NOT: case HF64_NOT: sprintf(buf, "x = ~x;"); break; case HF32_BSWAP: sprintf(buf, "x = __builtin_bswap32(x);"); break; case HF64_BSWAP: sprintf(buf, "x = __builtin_bswap64(x);"); break; case HF32_XOR: sprintf(buf, "x ^= 0x%08llx;", c); break; case HF32_MUL: sprintf(buf, "x *= 0x%08llx;", c); break; case HF32_ADD: sprintf(buf, "x += 0x%08llx;", c); break; case HF32_ROT: sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 32 - c); break; case HF32_XORL: sprintf(buf, "x ^= x << %llu;", c); break; case HF32_XORR: sprintf(buf, "x ^= x >> %llu;", c); break; case HF32_ADDL: sprintf(buf, "x += x << %llu;", c); break; case HF32_SUBL: sprintf(buf, "x -= x << %llu;", c); break; case HF64_XOR: sprintf(buf, "x ^= 0x%016llx;", c); break; case HF64_MUL: sprintf(buf, "x *= 0x%016llx;", c); break; case HF64_ADD: sprintf(buf, "x += 0x%016llx;", c); break; case HF64_ROT: sprintf(buf, "x = (x << %llu) | (x >> %lld);", c, 64 - c); break; case HF64_XORL: sprintf(buf, "x ^= x << %llu;", c); break; case HF64_XORR: sprintf(buf, "x ^= x >> %llu;", c); break; case HF64_ADDL: sprintf(buf, "x += x << %llu;", c); break; case HF64_SUBL: sprintf(buf, "x -= x << %llu;", c); break; } } static void hf_printfunc(const struct hf_op *ops, int n, FILE *f) { if (ops[0].type <= HF32_SUBL) fprintf(f, "uint32_t\nhash(uint32_t x)\n{\n"); else fprintf(f, "uint64_t\nhash(uint64_t x)\n{\n"); for (int i = 0; i < n; i++) { char buf[64]; hf_print(ops + i, buf); fprintf(f, " %s\n", buf); } fprintf(f, " return x;\n}\n"); } static unsigned char * hf_compile(const struct hf_op *ops, int n, unsigned char *buf) { if (ops[0].type <= HF32_SUBL) { /* mov eax, edi*/ *buf++ = 0x89; *buf++ = 0xf8; } else { /* mov rax, rdi*/ *buf++ = 0x48; *buf++ = 0x89; *buf++ = 0xf8; } for (int i = 0; i < n; i++) { switch (ops[i].type) { case HF32_NOT: /* not eax */ *buf++ = 0xf7; *buf++ = 0xd0; break; case HF32_BSWAP: /* bswap eax */ *buf++ = 0x0f; *buf++ = 0xc8; break; case HF32_XOR: /* xor eax, imm32 */ *buf++ = 0x35; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; break; case HF32_MUL: /* imul eax, eax, imm32 */ *buf++ = 0x69; *buf++ = 0xc0; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; break; case HF32_ADD: /* add eax, imm32 */ *buf++ = 0x05; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; break; case HF32_ROT: /* rol eax, imm8 */ *buf++ = 0xc1; *buf++ = 0xc0; *buf++ = ops[i].constant; break; case HF32_XORL: /* mov edi, eax */ *buf++ = 0x89; *buf++ = 0xc7; /* shl edi, imm8 */ *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* xor eax, edi */ *buf++ = 0x31; *buf++ = 0xf8; break; case HF32_XORR: /* mov edi, eax */ *buf++ = 0x89; *buf++ = 0xc7; /* shr edi, imm8 */ *buf++ = 0xc1; *buf++ = 0xef; *buf++ = ops[i].constant; /* xor eax, edi */ *buf++ = 0x31; *buf++ = 0xf8; break; case HF32_ADDL: /* mov edi, eax */ *buf++ = 0x89; *buf++ = 0xc7; /* shl edi, imm8 */ *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* add eax, edi */ *buf++ = 0x01; *buf++ = 0xf8; break; case HF32_SUBL: /* mov edi, eax */ *buf++ = 0x89; *buf++ = 0xc7; /* shl edi, imm8 */ *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* sub eax, edi */ *buf++ = 0x29; *buf++ = 0xf8; break; case HF64_NOT: /* not rax */ *buf++ = 0x48; *buf++ = 0xf7; *buf++ = 0xd0; break; case HF64_BSWAP: /* bswap rax */ *buf++ = 0x48; *buf++ = 0x0f; *buf++ = 0xc8; break; case HF64_XOR: /* mov rdi, imm64 */ *buf++ = 0x48; *buf++ = 0xbf; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; *buf++ = ops[i].constant >> 32; *buf++ = ops[i].constant >> 40; *buf++ = ops[i].constant >> 48; *buf++ = ops[i].constant >> 56; /* xor rax, rdi */ *buf++ = 0x48; *buf++ = 0x31; *buf++ = 0xf8; break; case HF64_MUL: /* mov rdi, imm64 */ *buf++ = 0x48; *buf++ = 0xbf; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; *buf++ = ops[i].constant >> 32; *buf++ = ops[i].constant >> 40; *buf++ = ops[i].constant >> 48; *buf++ = ops[i].constant >> 56; /* imul rax, rdi */ *buf++ = 0x48; *buf++ = 0x0f; *buf++ = 0xaf; *buf++ = 0xc7; break; case HF64_ADD: /* mov rdi, imm64 */ *buf++ = 0x48; *buf++ = 0xbf; *buf++ = ops[i].constant >> 0; *buf++ = ops[i].constant >> 8; *buf++ = ops[i].constant >> 16; *buf++ = ops[i].constant >> 24; *buf++ = ops[i].constant >> 32; *buf++ = ops[i].constant >> 40; *buf++ = ops[i].constant >> 48; *buf++ = ops[i].constant >> 56; /* add rax, rdi */ *buf++ = 0x48; *buf++ = 0x01; *buf++ = 0xf8; break; case HF64_ROT: /* rol rax, imm8 */ *buf++ = 0x48; *buf++ = 0xc1; *buf++ = 0xc0; *buf++ = ops[i].constant; break; case HF64_XORL: /* mov edi, eax */ *buf++ = 0x48; *buf++ = 0x89; *buf++ = 0xc7; /* shl rdi, imm8 */ *buf++ = 0x48; *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* xor rax, rdi */ *buf++ = 0x48; *buf++ = 0x31; *buf++ = 0xf8; break; case HF64_XORR: /* mov rdi, rax */ *buf++ = 0x48; *buf++ = 0x89; *buf++ = 0xc7; /* shr rdi, imm8 */ *buf++ = 0x48; *buf++ = 0xc1; *buf++ = 0xef; *buf++ = ops[i].constant; /* xor rax, rdi */ *buf++ = 0x48; *buf++ = 0x31; *buf++ = 0xf8; break; case HF64_ADDL: /* mov rdi, rax */ *buf++ = 0x48; *buf++ = 0x89; *buf++ = 0xc7; /* shl rdi, imm8 */ *buf++ = 0x48; *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* add rax, rdi */ *buf++ = 0x48; *buf++ = 0x01; *buf++ = 0xf8; break; case HF64_SUBL: /* mov rdi, rax */ *buf++ = 0x48; *buf++ = 0x89; *buf++ = 0xc7; /* shl rdi, imm8 */ *buf++ = 0x48; *buf++ = 0xc1; *buf++ = 0xe7; *buf++ = ops[i].constant; /* sub rax, rdi */ *buf++ = 0x48; *buf++ = 0x29; *buf++ = 0xf8; break; } } /* ret */ *buf++ = 0xc3; return buf; } static void * execbuf_alloc(void) { int prot = PROT_READ | PROT_WRITE; int flags = MAP_PRIVATE | MAP_ANONYMOUS; void *p = mmap(NULL, 4096, prot, flags, -1, 0); if (p == MAP_FAILED) { fprintf(stderr, "prospector: %s\n", strerror(errno)); exit(EXIT_FAILURE); } return p; } static enum { WXR_UNKNOWN, WXR_ENABLED, WXR_DISABLED } wxr_enabled = WXR_UNKNOWN; static void execbuf_lock(void *buf) { switch (wxr_enabled) { case WXR_UNKNOWN: if (!mprotect(buf, 4096, PROT_READ | PROT_WRITE | PROT_EXEC)) { wxr_enabled = WXR_DISABLED; return; } wxr_enabled = WXR_ENABLED; /* FALLTHROUGH */ case WXR_ENABLED: if (mprotect(buf, 4096, PROT_READ | PROT_EXEC)) { fprintf(stderr, "prospector: mprotect(PROT_EXEC) failed: %s\n", strerror(errno)); exit(EXIT_FAILURE); } break; case WXR_DISABLED: break; } } static void execbuf_unlock(void *buf) { switch (wxr_enabled) { case WXR_UNKNOWN: abort(); case WXR_ENABLED: mprotect(buf, 4096, PROT_READ | PROT_WRITE); break; case WXR_DISABLED: break; } } /* Higher quality is slower but has more consistent results. */ static int score_quality = 18; /* Measures how each input bit affects each output bit. This measures * both bias and avalanche. */ static double estimate_bias32(uint32_t ABI (*f)(uint32_t), uint64_t rng[2]) { long n = 1L << score_quality; long bins[32][32] = {{0}}; for (long i = 0; i < n; i++) { uint32_t x = xoroshiro128plus(rng); uint32_t h0 = f(x); for (int j = 0; j < 32; j++) { uint32_t bit = UINT32_C(1) << j; uint32_t h1 = f(x ^ bit); uint32_t set = h0 ^ h1; for (int k = 0; k < 32; k++) bins[j][k] += (set >> k) & 1; } } double mean = 0; for (int j = 0; j < 32; j++) { for (int k = 0; k < 32; k++) { /* FIXME: normalize this somehow */ double diff = (bins[j][k] - n / 2) / (n / 2.0); mean += (diff * diff) / (32 * 32); } } return sqrt(mean) * 1000.0; } static double estimate_bias64(uint64_t ABI (*f)(uint64_t), uint64_t rng[2]) { long n = 1L << score_quality; long bins[64][64] = {{0}}; for (long i = 0; i < n; i++) { uint64_t x = xoroshiro128plus(rng); uint64_t h0 = f(x); for (int j = 0; j < 64; j++) { uint64_t bit = UINT64_C(1) << j; uint64_t h1 = f(x ^ bit); uint64_t set = h0 ^ h1; for (int k = 0; k < 64; k++) bins[j][k] += (set >> k) & 1; } } double mean = 0; for (int j = 0; j < 64; j++) { for (int k = 0; k < 64; k++) { /* FIXME: normalize this somehow */ double diff = (bins[j][k] - n / 2) / (n / 2.0); mean += (diff * diff) / (64 * 64); } } return sqrt(mean) * 1000.0; } #define EXACT_SPLIT 32 // must be power of two static double exact_bias32(uint32_t ABI (*f)(uint32_t)) { long long bins[32][32] = {{0}}; static const uint64_t range = (UINT64_C(1) << 32) / EXACT_SPLIT; #pragma omp parallel for for (int i = 0; i < EXACT_SPLIT; i++) { long long b[32][32] = {{0}}; for (uint64_t x = i * range; x < (i + 1) * range; x++) { uint32_t h0 = f(x); for (int j = 0; j < 32; j++) { uint32_t bit = UINT32_C(1) << j; uint32_t h1 = f(x ^ bit); uint32_t set = h0 ^ h1; for (int k = 0; k < 32; k++) b[j][k] += (set >> k) & 1; } } #pragma omp critical for (int j = 0; j < 32; j++) for (int k = 0; k < 32; k++) bins[j][k] += b[j][k]; } double mean = 0.0; for (int j = 0; j < 32; j++) { for (int k = 0; k < 32; k++) { double diff = (bins[j][k] - 2147483648L) / 2147483648.0; mean += (diff * diff) / (32 * 32); } } return sqrt(mean) * 1000.0; } static void usage(FILE *f) { fprintf(f, "usage: prospector " "[-E|L|S] [-4|-8] [-ehs] [-l lib] [-p pattern] [-r n:m] [-t x]\n"); fprintf(f, " -4 Generate 32-bit hash functions (default)\n"); fprintf(f, " -8 Generate 64-bit hash functions\n"); fprintf(f, " -e Measure bias exactly (requires -E)\n"); fprintf(f, " -h Print this help message\n"); fprintf(f, " -l ./lib.so Load hash() from a shared object\n"); fprintf(f, " -p pattern Search only a given pattern\n"); fprintf(f, " -q n Score quality knob (12-30, default: 18)\n"); fprintf(f, " -r n:m Use between n and m operations [3:6]\n"); fprintf(f, " -s Don't use large constants\n"); fprintf(f, " -t x Initial score threshold [10.0]\n"); fprintf(f, " -E Single evaluation mode (requires -p or -l)\n"); fprintf(f, " -S Hash function search mode (default)\n"); fprintf(f, " -L Enumerate output mode (requires -p or -l)\n"); } static int parse_operand(struct hf_op *op, char *buf) { op->flags |= FOP_LOCKED; switch (op->type) { case HF32_NOT: case HF64_NOT: case HF32_BSWAP: case HF64_BSWAP: return 0; case HF32_XOR: case HF32_MUL: case HF32_ADD: case HF64_XOR: case HF64_MUL: case HF64_ADD: op->constant = strtoull(buf, 0, 16); return 1; case HF32_ROT: case HF32_XORL: case HF32_XORR: case HF32_ADDL: case HF32_SUBL: case HF64_ROT: case HF64_XORL: case HF64_XORR: case HF64_ADDL: case HF64_SUBL: op->constant = atoi(buf); return 1; } return 0; } static int parse_template(struct hf_op *ops, int n, char *template, int flags) { int c = 0; int offset = flags & F_U64 ? HF64_XOR : 0; for (char *tok = strtok(template, ","); tok; tok = strtok(0, ",")) { if (c == n) return 0; int found = 0; size_t operand = strcspn(tok, ":"); int sep = tok[operand]; tok[operand] = 0; ops[c].flags = 0; for (int i = 0; i < countof(hf_names); i++) { if (!strcmp(hf_names[i] + 2, tok)) { found = 1; ops[c].type = i + offset; break; } } if (!found) return 0; if (sep == ':' && !parse_operand(ops + c, tok + operand + 1)) return 0; c++; } return c; } static void * load_function(const char *so) { void *handle = dlopen(so, RTLD_NOW); if (!handle) { fprintf(stderr, "prospector: could not load %s\n", so); exit(EXIT_FAILURE); } void *f = dlsym(handle, "hash"); if (!f) { fprintf(stderr, "prospector: could not find 'hash' in %s\n", so); exit(EXIT_FAILURE); } return f; } static uint64_t uepoch(void) { struct timeval tv; gettimeofday(&tv, NULL); return 1000000LL * tv.tv_sec + tv.tv_usec; } int main(int argc, char **argv) { int nops = 0; int min = 3; int max = 6; int flags = 0; int use_exact = 0; double best = 100.0; char *dynamic = 0; char *template = 0; struct hf_op ops[32]; void *buf = execbuf_alloc(); uint64_t rng[2] = {0x2a2bc037b59ff989, 0x6d7db86fa2f632ca}; enum {MODE_SEARCH, MODE_EVAL, MODE_LIST} mode = MODE_SEARCH; int option; while ((option = getopt(argc, argv, "48EehLl:q:r:st:p:")) != -1) { switch (option) { case '4': flags &= ~F_U64; break; case '8': flags |= F_U64; break; case 'E': mode = MODE_EVAL; break; case 'e': use_exact = 1; break; case 'h': usage(stdout); exit(EXIT_SUCCESS); break; case 'L': mode = MODE_LIST; break; case 'l': dynamic = optarg; break; case 'p': template = optarg; break; case 'r': if (sscanf(optarg, "%d:%d", &min, &max) != 2 || min < 1 || max > countof(ops) || min > max) { fprintf(stderr, "prospector: invalid range (-r): %s\n", optarg); exit(EXIT_FAILURE); } break; case 'q': score_quality = atoi(optarg); if (score_quality < 12 || score_quality > 30) { fprintf(stderr, "prospector: invalid quality: %s\n", optarg); exit(EXIT_FAILURE); } break; case 'S': mode = MODE_SEARCH; break; case 's': flags |= F_TINY; break; case 't': best = strtod(optarg, 0); break; default: usage(stderr); exit(EXIT_FAILURE); } } /* Get a unique seed */ FILE *urandom = fopen("/dev/urandom", "rb"); if (urandom) { if (!fread(rng, sizeof(rng), 1, urandom)) { fputs("prospector: failed to read /dev/urandom\n", stderr); exit(EXIT_FAILURE); } fclose(urandom); } if (template) { nops = parse_template(ops, countof(ops), template, flags); if (!nops) { fprintf(stderr, "prospector: invalid template\n"); exit(EXIT_FAILURE); } } if (mode == MODE_EVAL) { double bias; void *hashptr = 0; if (template) { hf_randfunc(ops, nops, rng); hf_compile(ops, nops, buf); execbuf_lock(buf); hashptr = buf; } else if (dynamic) { hashptr = load_function(dynamic); } else { fprintf(stderr, "prospector: must supply -p or -l\n"); exit(EXIT_FAILURE); } uint64_t nhash; uint64_t beg = uepoch(); if (flags & F_U64) { uint64_t ABI (*hash)(uint64_t) = hashptr; if (use_exact) fputs("warning: no exact bias for 64-bit\n", stderr); bias = estimate_bias64(hash, rng); nhash = (1L << score_quality) * 33; } else { uint32_t ABI (*hash)(uint32_t) = hashptr; if (use_exact) { bias = exact_bias32(hash); nhash = (1LL << 32) * 33; } else { bias = estimate_bias32(hash, rng); nhash = (1L << score_quality) * 65; } } uint64_t end = uepoch(); printf("bias = %.17g\n", bias); printf("speed = %.3f nsec / hash\n", (end - beg) * 1000.0 / nhash); return 0; } if (mode == MODE_LIST) { void *hashptr = 0; if (template) { hf_randfunc(ops, nops, rng); hf_compile(ops, nops, buf); execbuf_lock(buf); hashptr = buf; } else if (dynamic) { hashptr = load_function(dynamic); } else { fprintf(stderr, "prospector: must supply -p or -l\n"); exit(EXIT_FAILURE); } if (flags & F_U64) { uint64_t ABI (*hash)(uint64_t) = hashptr; uint64_t i = 0; do printf("%016llx %016llx\n", (unsigned long long)i, (unsigned long long)hash(i)); while (++i); } else { uint32_t ABI (*hash)(uint32_t) = hashptr; uint32_t i = 0; do printf("%08lx %08lx\n", (unsigned long)i, (unsigned long)hash(i)); while (++i); } return 0; } for (;;) { /* Generate */ if (template) { hf_randfunc(ops, nops, rng); } else { nops = min + xoroshiro128plus(rng) % (max - min + 1); hf_genfunc(ops, nops, flags, rng); } /* Evaluate */ double score; hf_compile(ops, nops, buf); execbuf_lock(buf); if (flags & F_U64) { uint64_t ABI (*hash)(uint64_t) = (void *)buf; score = estimate_bias64(hash, rng); } else { uint32_t ABI (*hash)(uint32_t) = (void *)buf; score = estimate_bias32(hash, rng); } execbuf_unlock(buf); /* Compare */ if (score < best) { printf("// score = %.17g\n", score); hf_printfunc(ops, nops, stdout); fflush(stdout); best = score; } } }
Example_tasking.2.c
/* * @@name: tasking.2c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ struct node { struct node *left; struct node *right; }; extern void process(struct node *); void postorder_traverse( struct node *p ) { if (p->left) #pragma omp task // p is firstprivate by default postorder_traverse(p->left); if (p->right) #pragma omp task // p is firstprivate by default postorder_traverse(p->right); #pragma omp taskwait process(p); }
GB_binop__times_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__times_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint64) // A*D function (colscale): GB (_AxD__times_uint64) // D*A function (rowscale): GB (_DxB__times_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__times_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__times_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint64) // C=scalar+B GB (_bind1st__times_uint64) // C=scalar+B' GB (_bind1st_tran__times_uint64) // C=A+scalar GB (_bind2nd__times_uint64) // C=A'+scalar GB (_bind2nd_tran__times_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x * y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT64 || GxB_NO_TIMES_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__times_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__times_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__times_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__times_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__times_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__times_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__times_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__times_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__times_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__times_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB (_bind1st_tran__times_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB (_bind2nd_tran__times_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_for_firstprivate_nothreadprivate.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int test_omp_for_firstprivate() { int sum; int sum0; int known_sum; int threadsnum; sum = 0; sum0 = 12345; #pragma omp parallel { int sum1 = 0; #pragma omp single { threadsnum=omp_get_num_threads(); } /* sum0 = 0; */ int i; #pragma omp for firstprivate(sum0) for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; sum1 = sum0; } /* end of for */ #pragma omp critical { sum = sum + sum1; } /* end of critical */ } /* end of parallel */ known_sum = 12345* threadsnum+ (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; return (known_sum == sum); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_firstprivate()) { num_failed++; } } return num_failed; }
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int slow_function_with_mp() { double stuff[1000]; long i; #pragma omp parallel for private(i) shared(stuff) for (i=0; i<9999999; i++) { stuff[i % 1000] = sin(i*i); } } void main(int argc, char **argv) { double start, end; int i; for (i=0; i<100; i++) { omp_set_num_threads(i % 10); start = omp_get_wtime(); slow_function_with_mp(); end = omp_get_wtime(); printf( "Threads: %03i Duration: %lf\n", i % 10, (end - start) * 0.01); } exit(EXIT_SUCCESS); }
pr34513.c
/* PR c++/34513 */ /* { dg-do run } */ #include <omp.h> extern void abort (); static int errors = 0; static int thrs = 4; int main () { omp_set_dynamic (0); #pragma omp parallel num_threads (thrs) { static int shrd = 0; #pragma omp atomic shrd += 1; #pragma omp barrier if (shrd != thrs) #pragma omp atomic errors += 1; } if (errors) abort (); return 0; }
GB_binop__ne_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_08__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_02__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_04__ne_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int8) // A*D function (colscale): GB (_AxD__ne_int8) // D*A function (rowscale): GB (_DxB__ne_int8) // C+=B function (dense accum): GB (_Cdense_accumB__ne_int8) // C+=b function (dense accum): GB (_Cdense_accumb__ne_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int8) // C=scalar+B GB (_bind1st__ne_int8) // C=scalar+B' GB (_bind1st_tran__ne_int8) // C=A+scalar GB (_bind2nd__ne_int8) // C=A'+scalar GB (_bind2nd_tran__ne_int8) // C type: bool // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_INT8 || GxB_NO_NE_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
main.c
#include <stdio.h> #include <math.h> #include <time.h> #include <omp.h> #define mm 15 #define npart 4*mm*mm*mm /* * Function declarations */ void dfill(int,double,double[],int); void domove(int,double[],double[],double[],double); void dscal(int,double,double[],int); void fcc(double[],int,int,double); void forces(int,double[],double[],double,double); double mkekin(int,double[],double[],double,double); void mxwell(double[],int,double,double); void prnout(int,double,double,double,double,double,double,int,double); double velavg(int,double[],double,double); double secnds(void); /* * Variable declarations */ double epot; double vir; double count; /* * Main program : Molecular Dynamics simulation. */ int main() { int move; double x[npart*3], vh[npart*3], f[npart*3]; double ekin; double vel; double sc; double start, time; /* * Parameter definitions */ double den = 0.83134; double side = pow((double)npart/den,0.3333333); double tref = 0.722; double rcoff = (double)mm/4.0; double h = 0.064; int irep = 10; int istop = 20; int iprint = 5; int movemx = 20; double a = side/(double)mm; double hsq = h*h; double hsq2 = hsq*0.5; double tscale = 16.0/((double)npart-1.0); double vaver = 1.13*sqrt(tref/24.0); /* * Initial output */ printf(" Molecular Dynamics Simulation example program\n"); printf(" ---------------------------------------------\n"); printf(" number of particles is ............ %6d\n",npart); printf(" side length of the box is ......... %13.6f\n",side); printf(" cut off is ........................ %13.6f\n",rcoff); printf(" reduced temperature is ............ %13.6f\n",tref); printf(" basic timestep is ................. %13.6f\n",h); printf(" temperature scale interval ........ %6d\n",irep); printf(" stop scaling at move .............. %6d\n",istop); printf(" print interval .................... %6d\n",iprint); printf(" total no. of steps ................ %6d\n",movemx); /* * Generate fcc lattice for atoms inside box */ fcc(x, npart, mm, a); /* * Initialise velocities and forces (which are zero in fcc positions) */ mxwell(vh, 3*npart, h, tref); dfill(3*npart, 0.0, f, 1); /* * Start of md */ printf("\n i ke pe e temp " " pres vel rp\n ----- ---------- ----------" " ---------- -------- -------- -------- ----\n"); start = secnds(); for (move=1; move<=movemx; move++) { /* * Move the particles and partially update velocities */ domove(3*npart, x, vh, f, side); /* * Compute forces in the new positions and accumulate the virial * and potential energy. */ #pragma omp parallel default(none) shared(x, f, side, rcoff) num_threads(4) { forces(npart, x, f, side, rcoff); } /* * Scale forces, complete update of velocities and compute k.e. */ ekin=mkekin(npart, f, vh, hsq2, hsq); /* * Average the velocity and temperature scale if desired */ vel=velavg(npart, vh, vaver, h); if (move<istop && fmod(move, irep)==0) { sc=sqrt(tref/(tscale*ekin)); dscal(3*npart, sc, vh, 1); ekin=tref/tscale; } /* * Sum to get full potential energy and virial */ if (fmod(move, iprint)==0) { prnout(move, ekin, epot, tscale, vir, vel, count, npart, den); } } time = secnds() - start; printf("Time = %f\n",(float) time); } time_t starttime = 0; double secnds() { return omp_get_wtime(); }
pkzip_fmt_plug.c
/* PKZIP patch for john to handle 'old' pkzip passwords (old 'native' format) * * Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2011 Jim Fougeron and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pkzip; #elif FMT_REGISTERS_H john_register_one(&fmt_pkzip); #else #include <string.h> #include "common.h" #include "arch.h" #include "misc.h" #include "formats.h" #define USE_PKZIP_MAGIC 1 #include "pkzip.h" #include "zlib.h" #include "pkzip_inffixed.h" // This file is a data file, taken from zlib #include "loader.h" #ifdef _OPENMP #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "PKZIP" #define FORMAT_NAME "" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 31 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE (sizeof(PKZ_SALT*)) #define SALT_ALIGN 4 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 #define OMP_SCALE 64 //#define ZIP_DEBUG 1 //#define ZIP_DEBUG 2 /* * It is likely that this should be put into the arch.h files for the different systems, * IF we find a system which operates faster doing the non-table work. * However, in current testing, it is always faster to use the multiply table. It only * takes 16kb, and almost always stays in the cache for any system newer than a 386. */ #define PKZIP_USE_MULT_TABLE #if ARCH_LITTLE_ENDIAN #define KB1 0 #define KB2 3 #else #define KB1 3 #define KB2 0 #endif /* * filename:$pkzip$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*DA]*$/pkzip$ (deprecated) * filename:$pkzip2$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*TC*DA]*$/pkzip2$ (new format, with 2 checksums) * * All numeric and 'binary data' fields are stored in hex. * * C is the count of hashes present (the array of items, inside the [] C can be 1 to 3.). * B is number of valid bytes in the checksum (1 or 2). Unix zip is 2 bytes, all others are 1 * ARRAY of data starts here (there will be C array elements) * DT is a "Data Type enum". This will be 1 2 or 3. 1 is 'partial'. 2 and 3 are full file data (2 is inline, 3 is load from file). * MT Magic Type enum. 0 is no 'type'. 255 is 'text'. Other types (like MS Doc, GIF, etc), see source. * NOTE, CL, DL, CRC, OFF are only present if DT != 1 * CL Compressed length of file blob data (includes 12 byte IV). * UL Uncompressed length of the file. * CR CRC32 of the 'final' file. * OF Offset to the PK\x3\x4 record for this file data. If DT==2, then this will be a 0, as it is not needed, all of the data is already included in the line. * OX Additional offset (past OF), to get to the zip data within the file. * END OF 'optional' fields. * CT Compression type (0 or 8) 0 is stored, 8 is imploded. * DL Length of the DA data. * CS Checksum from crc32. * TC Checksum from timestamp * DA This is the 'data'. It will be hex data if DT==1 or 2. If DT==3, then it is a filename (name of the .zip file). * END of array items. * The format string will end with $/pkzip$ * * NOTE, after some code testing, it has come to show, that the 'magic' may not be needed, or very useful. The problem with it, is IF the file * ends up NOT starting with any of the magic values, then we will have a false negative, and NEVER be able to crack the zip's password. For now * we have a #define (right before the #include "pkzip.h"). If that define is uncommented, then pkzip format will be built with magic logic. * However, right now it is not being built that way. * */ static struct fmt_tests tests[] = { /* compression of a perl file. We have the same password, same file used twice in a row (pkzip, 1 byte checksum). NOTE, pkzip uses random IV, so both encrypted blobs are different */ {"\ $pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*194883130e4c7419bd735c53dec36f0c4b6de6daefea0f507d67ff7256a49b5ea93ccfd9b12f2ee99053ee0b1c9e1c2b88aeaeb6bd4e60094a1ea118785d4ded6dae94\ cade41199330f4f11b37cba7cda5d69529bdfa43e2700ba517bd2f7ff4a0d4b3d7f2559690ec044deb818c44844d6dd50adbebf02cec663ae8ebb0dde05d2abc31eaf6de36a2fc19fda65dd6a7e449f669d1f8c75e9daa0a3f7b\ e8feaa43bf84762d6dbcc9424285a93cedfa3a75dadc11e969065f94fe3991bc23c9b09eaa5318aa29fa02e83b6bee26cafec0a5e189242ac9e562c7a5ed673f599cefcd398617*$/pkzip$", "password" }, {"\ $pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*581f798527109cbadfca0b3318435a000be84366caf9723f841a2b13e27c2ed8cdb5628705a98c3fbbfb34552ed498c51a172641bf231f9948bca304a6be2138ab718f\ 6a5b1c513a2fb80c49030ff1a404f7bd04dd47c684317adea4107e5d70ce13edc356c60bebd532418e0855428f9dd582265956e39a0b446a10fd8b7ffb2b4af559351bbd549407381c0d2acc270f3bcaffb275cbe2f628cb09e2\ 978e87cd023d4ccb50caaa92b6c952ba779980d65f59f664dde2451cc456d435188be59301a5df1b1b4fed6b7509196334556c44208a9d7e2d9e237f591d6c9fc467b408bf0aaa*$/pkzip$", "password" }, /* Now the same file, compressed twice, using unix zip (info-zip), with 2 byte checksums */ {"\ $pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\ 73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\ 3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password" }, {"\ $pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\ 73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\ 3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password"}, /* now a pkzip archive, with 3 files, 1 byte checksum */ {"\ $pkzip$3*1*1*0*8*24*4001*8986ec4d693e86c1a42c1bd2e6a994cb0b98507a6ec937fe0a41681c02fe52c61e3cc046*1*0*8*24*4003*a087adcda58de2e14e73db0043a4ff0ed3acc6a9aee3985d7cb81d5ddb32b840ea20\ 57d9*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*89a792af804bf38e31fdccc8919a75ab6eb75d1fd6e7ecefa3c5b9c78c3d50d656f42e582af95882a38168a8493b2de5031bb8b39797463cb4769a955a2ba72abe48ee75b103\ f93ef9984ae740559b9bd84cf848d693d86acabd84749853675fb1a79edd747867ef52f4ee82435af332d43f0d0bb056c49384d740523fa75b86a6d29a138da90a8de31dbfa89f2f6b0550c2b47c43d907395904453ddf42a665\ b5f7662de170986f89d46d944b519e1db9d13d4254a6b0a5ac02b3cfdd468d7a4965e4af05699a920e6f3ddcedb57d956a6b2754835b14e174070ba6aec4882d581c9f30*$/pkzip$", "3!files"}, /* following are from CMIYC 2012 */ {"$pkzip$1*1*2*0*163*2b5*cd154083*0*26*8*163*cd15*d6b094794b40116a8b387c10159225d776f815b178186e51faf16fa981fddbffdfa22f6c6f32d2f81dab35e141f2899841991f3cb8d53f8ee1f1d85657f7c7a82ebb2d63182803c6beee00e0bf6c72edeeb1b00dc9f07f917bb8544cc0e96ca01503cd0fb6632c296cebe3fb9b64543925daae6b7ea95cfd27c42f6f3465e0ab2c812b9aeeb15209ce3b691f27ea43a7a77b89c2387e31c4775866a044b6da783af8ddb72784ccaff4d9a246db96484e865ea208ade290b0131b4d2dd21f172693e6b5c90f2eb9b67572b55874b6d3a78763212b248629e744c07871a6054e24ef74b6d779e44970e1619df223b4e5a72a189bef40682b62be6fb7f65e087ca6ee19d1ebfc259fa7e3d98f3cb99347689f8360294352accffb146edafa9e91afba1f119f95145738ac366b332743d4ff40d49fac42b8758c43b0af5b60b8a1c63338359ffbff432774f2c92de3f8c49bd4611e134db98e6a3f2cfb148d2b20f75abab6*$/pkzip$", "passwort"}, {"$pkzip$1*1*2*0*163*2b6*46abc149*0*28*8*163*46ab*0f539b23b761a347a329f362f7f1f0249515f000404c77ec0b0ffe06f29140e8fa3e8e5a6354e57f3252fae3d744212d4d425dc44389dd4450aa9a4f2f3c072bee39d6ac6662620812978f7ab166c66e1acb703602707ab2da96bb28033485ec192389f213e48eda8fc7d9dad1965b097fafebfda6703117db90e0295db9a653058cb28215c3245e6e0f6ad321065bf7b8cc5f66f6f2636e0d02ea35a6ba64bbf0191c308098fd836e278abbce7f10c3360a0a682663f59f92d9c2dcfc87cde2aae27ea18a14d2e4a0752b6b51e7a5c4c8c2bab88f4fb0aba27fb20e448655021bb3ac63752fdb01e6b7c99f9223f9e15d71eb1bd8e323f522fc3da467ff0aae1aa17824085d5d6f1cdfc9c7c689cd7cb057005d94ba691f388484cfb842c8775baac220a5490ed945c8b0414dbfc4589254b856aade49f1aa386db86e9fc87e6475b452bd72c5e2122df239f8c2fd462ca54c1a5bddac36918c5f5cf0cc94aa6ee820*$/pkzip$", "Credit11"}, {"$pkzip$1*1*2*0*163*2b6*46abc149*0*26*8*163*46ab*7ea9a6b07ddc9419439311702b4800e7e1f620b0ab8535c5aa3b14287063557b176cf87a800b8ee496643c0b54a77684929cc160869db4443edc44338294458f1b6c8f056abb0fa27a5e5099e19a07735ff73dc91c6b20b05c023b3ef019529f6f67584343ac6d86fa3d12113f3d374b047efe90e2a325c0901598f31f7fb2a31a615c51ea8435a97d07e0bd4d4afbd228231dbc5e60bf1116ce49d6ce2547b63a1b057f286401acb7c21afbb673f3e26bc1b2114ab0b581f039c2739c7dd0af92c986fc4831b6c294783f1abb0765cf754eada132df751cf94cad7f29bb2fec0c7c47a7177dea82644fc17b455ba2b4ded6d9a24e268fcc4545cae73b14ceca1b429d74d1ebb6947274d9b0dcfb2e1ac6f6b7cd2be8f6141c3295c0dbe25b65ff89feb62cb24bd5be33853b88b8ac839fdd295f71e17a7ae1f054e27ba5e60ca03c6601b85c3055601ce41a33127938440600aaa16cfdd31afaa909fd80afc8690aaf*$/pkzip$", "7J0rdan!!"}, /* CMIYC 2013 "pro" hard hash */ {"$pkzip$1*2*2*0*6b*73*8e687a5b*0*46*8*6b*0d9d*636fedc7a78a7f80cda8542441e71092d87d13da94c93848c230ea43fab5978759e506110b77bd4bc10c95bc909598a10adfd4febc0d42f3cd31e4fec848d6f49ab24bb915cf939fb1ce09326378bb8ecafde7d3fe06b6013628a779e017be0f0ad278a5b04e41807ae9fc*$/pkzip$", "c00rslit3!"}, /* http://corkami.googlecode.com/files/ChristmasGIFts.zip (fixed with 2 byte checksums from timestamp, using new $pkzip2$ type) */ {"$pkzip2$3*2*1*2*8*c0*7224*72f6*6195f9f3401076b22f006105c4323f7ac8bb8ebf8d570dc9c7f13ddacd8f071783f6bef08e09ce4f749af00178e56bc948ada1953a0263c706fd39e96bb46731f827a764c9d55945a89b952f0503747703d40ed4748a8e5c31cb7024366d0ef2b0eb4232e250d343416c12c7cbc15d41e01e986857d320fb6a2d23f4c44201c808be107912dbfe4586e3bf2c966d926073078b92a2a91568081daae85cbcddec75692485d0e89994634c71090271ac7b4a874ede424dafe1de795075d2916eae*1*6*8*c0*26ee*461b*944bebb405b5eab4322a9ce6f7030ace3d8ec776b0a989752cf29569acbdd1fb3f5bd5fe7e4775d71f9ba728bf6c17aad1516f3aebf096c26f0c40e19a042809074caa5ae22f06c7dcd1d8e3334243bca723d20875bd80c54944712562c4ff5fdb25be5f4eed04f75f79584bfd28f8b786dd82fd0ffc760893dac4025f301c2802b79b3cb6bbdf565ceb3190849afdf1f17688b8a65df7bc53bc83b01a15c375e34970ae080307638b763fb10783b18b5dec78d8dfac58f49e3c3be62d6d54f9*2*0*2a*1e*4a204eab*ce8*2c*0*2a*4a20*7235*6b6e1a8de47449a77e6f0d126b217d6b2b72227c0885f7dc10a2fb3e7cb0e611c5c219a78f98a9069f30*$/pkzip2$", "123456"}, {NULL} }; /* these static fields are used in the crypt_all loop, and the cmp_all/cmp_one we */ /* perform the pkzip 'checksum' checking. If we do get a 'hit', then that pass & */ /* salt pair is checked fully within the cmp_exact, where it gets inflated and */ /* checked (possibly also a 'sample TEXT record is done first, as a quick check */ static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static u32 *K12; static PKZ_SALT *salt; static u8 *chk; static int dirty=1; #if USE_PKZIP_MAGIC static ZIP_SIGS SIGS[256]; #endif #ifdef PKZIP_USE_MULT_TABLE static u8 mult_tab[16384]; #define PKZ_MULT(b,w) b^mult_tab[(u16)(w.u)>>2] #else inline u8 PKZ_MULT(u8 b, MY_WORD w) {u16 t = w.u|2; return b ^ (u8)(((u16)(t*(t^1))>>8)); } #endif extern struct fmt_main fmt_pkzip; static const char *ValidateZipContents(FILE *in, long offset, u32 offex, int len, u32 crc); /* Since the pkzip format textual representation is pretty complex, with multiple */ /* 'optional' sections, we have a VERY complete valid. Valid will make SURE that */ /* the format is completely valid. Thus, there is little or no error checking later */ /* in the rest of the code. It 'should' not be needed, and is done here. There is */ /* a little error checking later in the file, for some of the file opening stuff, */ /* since the file can change from the time of this 'valid' call, until when the data */ /* is actually read from the file. */ /* */ /* NOTE, we may want to later make a 'prepare()' function, and do all file loading */ /* there, so that we have a 'complete' format line, with the zip data contained. */ static int valid(char *ciphertext, struct fmt_main *self) { u8 *p, *cp, *cpkeep; int cnt, data_len, ret=0; u32 crc; FILE *in; const char *sFailStr; long offset; u32 offex; int type; int complen = 0; int type2 = 0; if (strncmp(ciphertext, "$pkzip$", 7)) { if (!strncmp(ciphertext, "$pkzip2$", 8)) type2 = 1; else return ret; } cpkeep = (u8*)strdup(ciphertext); cp = cpkeep; p = &cp[7]; if (type2) ++p; p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Out of data, reading count of hashes field"; goto Bail; } sscanf((c8*)cp, "%x", &cnt); if (cnt < 1 || cnt > MAX_PKZ_FILES) { sFailStr = "Count of hashes field out of range"; goto Bail; } p = pkz_GetFld(p, &cp); if (cp[0] < '0' || cp[0] > '2' || cp[1]) { sFailStr = "Number of valid hash bytes empty or out of range"; goto Bail; } while (cnt--) { p = pkz_GetFld(p, &cp); if (cp[0]<'1' || cp[0]>'3' || cp[1]) { sFailStr = "Invalid data enumeration type"; goto Bail; } type = cp[0] - '0'; p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid type enumeration"; goto Bail; } if (type > 1) { p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid compressed length"; goto Bail; } sscanf((c8*)cp, "%x", &complen); p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid data length value"; goto Bail; } p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid CRC value"; goto Bail; } sscanf((c8*)cp, "%x", &crc); p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid offset length"; goto Bail; } sscanf((c8*)cp, "%lx", &offset); p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid offset length"; goto Bail; } sscanf((c8*)cp, "%x", &offex); } p = pkz_GetFld(p, &cp); if ((cp[0] != '0' && cp[0] != '8') || cp[1]) { sFailStr = "Compression type enumeration"; goto Bail; } p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp)) { sFailStr = "Invalid data length value"; goto Bail; } sscanf((c8*)cp, "%x", &data_len); p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp) || strlen((c8*)cp) != 4) { sFailStr = "invalid checksum value"; goto Bail; } if (type2) { p = pkz_GetFld(p, &cp); if (!pkz_is_hex_str(cp) || strlen((c8*)cp) != 4) { sFailStr = "invalid checksum2 value"; goto Bail;} } p = pkz_GetFld(p, &cp); if (type > 1) { if (type == 3) { if ( !p || strlen((c8*)cp) != data_len) { sFailStr = "invalid checksum value"; goto Bail; } in = fopen((c8*)cp, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */ if (!in) { /* this error is listed, even if not in pkzip debugging mode. */ /* But not if we're just reading old pot lines */ if (!ldr_in_pot) fprintf(stderr, "Error loading a pkzip hash line. The ZIP file '%s' could NOT be found\n", cp); return 0; } sFailStr = ValidateZipContents(in, offset, offex, complen, crc); if (*sFailStr) { /* this error is listed, even if not in pkzip debugging mode. */ fprintf(stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext); fclose(in); return 0; } fseek(in, offset+offex, SEEK_SET); if (complen < 16*1024) { /* simply load the whole blob */ unsigned char *tbuf = mem_alloc_tiny(complen, MEM_ALIGN_WORD); if (fread(tbuf, 1, complen, in) != complen) { fclose(in); return 0; } data_len = complen; } fclose(in); } else { /* 'inline' data. */ if (complen != data_len) { sFailStr = "length of full data does not match the salt len"; goto Bail; } if (!pkz_is_hex_str(cp) || strlen((c8*)cp) != data_len<<1) { sFailStr = "invalid inline data"; goto Bail; } } } else { if (!pkz_is_hex_str(cp) || strlen((c8*)cp) != data_len<<1) { sFailStr = "invalid partial data"; goto Bail; } } } p = pkz_GetFld(p, &cp); if (type2) ret = !strcmp((c8*)cp, "$/pkzip2$") && !*p; else ret = !strcmp((c8*)cp, "$/pkzip$") && !*p; Bail:; #ifdef ZIP_DEBUG if (!ret) fprintf (stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext); #endif MEM_FREE(cpkeep); return ret; } static const char *ValidateZipContents(FILE *fp, long offset, u32 offex, int _len, u32 _crc) { u32 id; u16 version, flags, method, modtm, moddt, namelen, exlen; u32 crc, complen, uncomplen; if (fseek(fp, offset, SEEK_SET) != 0) return "Not able to seek to specified offset in the .zip file, to read the zip blob data."; id = fget32LE(fp); if (id != 0x04034b50U) return "Compressed zip file offset does not point to start of zip blob"; /* Ok, see if this IS the correct file blob. */ version = fget16LE(fp); flags = fget16LE(fp); method = fget16LE(fp); modtm = fget16LE(fp); moddt = fget16LE(fp); crc = fget32LE(fp); complen = fget32LE(fp); uncomplen = fget32LE(fp); namelen = fget16LE(fp); exlen = fget16LE(fp); /* unused vars. */ (void)uncomplen; (void)modtm; (void)moddt; /* Even if we 'miss', we keep walking back. We 'can' miss if the CRC of file, or some other */ /* binary data happens to have the 0x04034b50 signature, thus giving us a false local header hit. */ if (_crc == crc && _len == complen && (0x14 == version || 0xA == version) && (flags & 1) && (method == 8 || method == 0) && offex==30+namelen+exlen) return ""; return "We could NOT find the internal zip data in this ZIP file"; } static u8 *buf_copy (char *p, int len) { u8 *op = mem_alloc_tiny(len, MEM_ALIGN_NONE); memcpy(op, p, len); return op; } static void init(struct fmt_main *self) { #ifdef PKZIP_USE_MULT_TABLE unsigned short n=0; #endif #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); K12 = mem_calloc_tiny(sizeof(*K12) * 3 * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); chk = mem_calloc_tiny(sizeof(*chk) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); /* * Precompute the multiply mangling, within several parts of the hash. There is a pattern, * 64k entries long. However the exact same value is produced 4 times in a row, every * time. Thus, we can build a 16k wide array, and then access the array using this * ((val&0xFFFF) >> 2) This is faster on all current HW, since the 16kb array access * (and the and/shift) is faster than performing the whole mult, 2 shifts, 2 adds and * an and (if the compiler can optimize it to that) * * There is a # define at the top of this file that turns this OFF. if that define is * not set, then these mult's will be done in the crypt_all and decrypt functions */ #ifdef PKZIP_USE_MULT_TABLE for (n = 0; n < 16384; n++) mult_tab[n] = ((n*4+3) * (n*4+2) >> 8) & 0xff; #endif #if USE_PKZIP_MAGIC //static char *MagicTypes[]= { "", "DOC", "XLS", "DOT", "XLT", "EXE", "DLL", "ZIP", "BMP", "DIB", "GIF", "PDF", "GZ", "TGZ", "BZ2", "TZ2", "FLV", "SWF", "MP3", NULL }; //static int MagicToEnum[] = {0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 0}; // decent sources of these: // http://www.garykessler.net/library/file_sigs.html // http://en.wikipedia.org/wiki/List_of_file_signatures // http://toorcon.techpathways.com/uploads/headersig.txt // not available, 2012-12-28) // archive.org still has a version: // http://web.archive.org/web/20110725085828/http://toorcon.techpathways.com/uploads/headersig.txt // there are many more. //case 1: // DOC/XLS SIGS[1].magic_signature[0] = (u8*)str_alloc_copy("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"); SIGS[1].magic_sig_len[0] = 8; SIGS[1].magic_signature[1] = buf_copy("\x50\x4B\x03\x04\x14\x00\x06\x00\x08", 10); // a .zip file 'sort of' SIGS[1].magic_sig_len[1] = 9; SIGS[1].magic_signature[2] = buf_copy("\x09\x04\x06\x00\x00\x00\x10\x00\xF6\x05\x5C\x00", 13); // older XLS format (office 95) SIGS[1].magic_sig_len[2] = 12; SIGS[1].magic_signature[3] = buf_copy("\x09\x02\x06\x00\x00\x00\x10\x00\xB9\x04\x5C\x00", 13); // older XLS v2 SIGS[1].magic_sig_len[3] = 12; SIGS[1].magic_signature[4] = buf_copy("\x50\x4B\x03\x04\x14\x00\x00\x00\x00\x00", 11); //DOC Star Writer 6.0 SIGS[1].magic_sig_len[4] = 10; SIGS[1].magic_signature[5] = buf_copy("\x31\xBE\x00\x00\x00\xAB\x00\x00", 9); //DOC MS Word for DOS v6 File SIGS[1].magic_sig_len[5] = 8; SIGS[1].magic_signature[6] = (u8*)str_alloc_copy("\x12\x34\x56\x78\x90\xFF"); //DOC MS Word 6.0 File SIGS[1].magic_sig_len[6] = 6; SIGS[1].magic_signature[7] = (u8*)str_alloc_copy("\x7F\xFE\x34\x0A"); //MS Word File SIGS[1].magic_sig_len[7] = 4; SIGS[1].magic_count = 8; SIGS[1].max_len = 12; //case 2: // Win32/DOS exe file MZ SIGS[2].magic_signature[0] = (u8*)str_alloc_copy("MZ"); SIGS[2].magic_sig_len[0] = 2; SIGS[2].magic_count = 1; SIGS[2].max_len = 2; //case 3: // PKZIP SIGS[3].magic_signature[0] = (u8*)str_alloc_copy("\x50\x4B\x03\x04"); SIGS[3].magic_sig_len[0] = 4; SIGS[3].magic_count = 1; SIGS[3].max_len = 4; //case 4: // BMP SIGS[4].magic_signature[0] = (u8*)str_alloc_copy("BM"); SIGS[4].magic_sig_len[0] = 2; SIGS[4].magic_count = 1; SIGS[4].max_len = 2; //case 5: // GIF SIGS[5].magic_signature[0] = (u8*)str_alloc_copy("GIF87a"); SIGS[5].magic_sig_len[0] = 6; SIGS[5].magic_signature[1] = (u8*)str_alloc_copy("GIF89a"); SIGS[5].magic_sig_len[1] = 6; SIGS[5].magic_count = 2; SIGS[5].max_len = 6; //case 6: // PDF SIGS[6].magic_signature[0] = (u8*)str_alloc_copy("%PDF"); SIGS[6].magic_sig_len[0] = 4; SIGS[6].magic_count = 1; SIGS[6].max_len = 4; //case 7: // GZ SIGS[7].magic_signature[0] = (u8*)str_alloc_copy("\x1F\x8B\x08"); SIGS[7].magic_sig_len[0] = 3; SIGS[7].magic_count = 1; SIGS[7].max_len = 3; //case 8: // BZ2 (there is a 'magic' pi, but byte 4 is 1 to 9, so skip the 'pi') SIGS[8].magic_signature[0] = (u8*)str_alloc_copy("BZh"); SIGS[8].magic_sig_len[0] = 3; SIGS[8].magic_signature[1] = (u8*)str_alloc_copy("BZ0"); SIGS[8].magic_sig_len[1] = 3; SIGS[8].magic_count = 2; SIGS[8].max_len = 3; //case 9: // FLV SIGS[9].magic_signature[0] = (u8*)str_alloc_copy("FLV\x01"); SIGS[9].magic_sig_len[0] = 4; SIGS[9].magic_count = 1; SIGS[9].max_len = 4; //case 10: // SWF SIGS[10].magic_signature[0] = (u8*)str_alloc_copy("FWS"); SIGS[10].magic_sig_len[0] = 5; SIGS[10].magic_count = 1; SIGS[10].max_len = 5; //case 11: // MP3 SIGS[11].magic_signature[0] = (u8*)str_alloc_copy("ID3"); SIGS[11].magic_sig_len[0] = 3; SIGS[11].magic_count = 1; SIGS[11].max_len = 3; SIGS[255].max_len = 64; #endif } static void set_salt(void *_salt) { salt = *((PKZ_SALT**)_salt); } static void *get_salt(char *ciphertext) { /* NOTE, almost NO error checking at all in this function. Proper error checking done in valid() */ static union alignment { unsigned char c[8]; ARCH_WORD_32 a[1]; } a; unsigned char *salt_p = a.c; PKZ_SALT *salt; long offset=0; u32 offex; int i, j; u8 *p, *cp, *cpalloc = (unsigned char*)mem_alloc(strlen(ciphertext)+1); int type2 = 0; /* Needs word align on REQ_ALIGN systems. May crash otherwise (in the sscanf) */ salt = mem_alloc_tiny(sizeof(PKZ_SALT), MEM_ALIGN_WORD); memcpy(salt_p, &salt, sizeof(salt)); memset(salt, 0, sizeof(PKZ_SALT)); cp = cpalloc; strcpy((c8*)cp, ciphertext); if (!strncmp((c8*)cp, "$pkzip$", 7)) p = &cp[7]; else { p = &cp[8]; type2 = 1; } p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->cnt)); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->chk_bytes)); for(i = 0; i < salt->cnt; ++i) { int data_enum; p = pkz_GetFld(p, &cp); data_enum = *cp - '0'; p = pkz_GetFld(p, &cp); #if USE_PKZIP_MAGIC { // mingw can't handle %hhx. Use 'normal' %x and assign back to uint_8 var unsigned jnk; sscanf((c8*)cp, "%x", &jnk); salt->H[i].magic = (unsigned char)jnk; } salt->H[i].pSig = &SIGS[salt->H[i].magic]; #endif if (data_enum > 1) { p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->compLen)); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->deCompLen)); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->crc32)); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%lx", &offset); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &offex); } p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->H[i].compType)); p = pkz_GetFld(p, &cp); sscanf((c8*)cp, "%x", &(salt->H[i].datlen)); p = pkz_GetFld(p, &cp); for (j = 0; j < 4; ++j) { salt->H[i].c <<= 4; salt->H[i].c |= atoi16[ARCH_INDEX(cp[j])]; } if (type2) { p = pkz_GetFld(p, &cp); for (j = 0; j < 4; ++j) { salt->H[i].c2 <<= 4; salt->H[i].c2 |= atoi16[ARCH_INDEX(cp[j])]; } } else salt->H[i].c2 = salt->H[i].c; // fake out 2nd hash, by copying first hash p = pkz_GetFld(p, &cp); if (data_enum > 1) { /* if 2 or 3, we have the FULL zip blob for decrypting. */ if (data_enum == 3) { /* read from file. */ FILE *fp; fp = fopen((c8*)cp, "rb"); if (!fp) { fprintf (stderr, "Error opening file for pkzip data: %s\n", cp); MEM_FREE(cpalloc); return 0; } fseek(fp, offset+offex, SEEK_SET); if (salt->compLen < 16*1024) { /* simply load the whole blob */ salt->H[i].h = mem_alloc_tiny(salt->compLen, MEM_ALIGN_WORD); if (fread(salt->H[i].h, 1, salt->compLen, fp) != salt->compLen) { fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp); fclose(fp); MEM_FREE(cpalloc); return 0; } fclose(fp); salt->H[i].datlen = salt->compLen; } else { /* Only load a small part (to be used in crypt_all), and set the filename in */ /* the salt->fname string, so that cmp_all can open the file, and buffered */ /* read the zip data only when it 'needs' it. */ salt->fname = str_alloc_copy((c8*)cp); salt->offset = offset+offex; salt->H[i].h = mem_alloc_tiny(384, MEM_ALIGN_WORD); if (fread(salt->H[i].h, 1, 384, fp) != 384) { fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp); fclose(fp); MEM_FREE(cpalloc); return 0; } fclose(fp); salt->H[i].datlen = 384; } } else { salt->H[i].h = mem_alloc_tiny(salt->compLen, MEM_ALIGN_WORD); for (j = 0; j < salt->H[i].datlen; ++j) salt->H[i].h[j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])]; } /* we also load this into the 'building' salt */ salt->compType = salt->H[i].compType; /* Now, set the 'is full zip' flag, so we later process as a zip file. */ salt->H[i].full_zip = 1; salt->full_zip_idx = i; } else { salt->H[i].h = mem_alloc_tiny(salt->H[i].datlen, MEM_ALIGN_WORD); for (j = 0; j < salt->H[i].datlen; ++j) salt->H[i].h[j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])]; } } MEM_FREE(cpalloc); // Ok, we want to add some 'logic' to remove the magic testing, except for specific cases. // If the only file blobs we have are stored, and long blobs, then we want magic (3 file, 2 byte checksum does not need magic). // A single 1 byte file, even if deflated, we want to keep magic. (possibly). j = 0; for (i = 0; i < salt->cnt; ++i) { if (salt->H[i].compType == 8) { if (salt->cnt == 1 && salt->chk_bytes == 1) j += 10; else break; } j += 1; } // ok, if j == 1, then we 'might' want to use magic. Otherwise, we want to 'clear' all magic values. if (j >= 20) j = 0; if (j && salt->chk_bytes == 2 && salt->cnt > 1) j = 0; // we do not need to use magic, on 2 or 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking if (j && salt->chk_bytes == 1 && salt->cnt == 3) j = 0; // we do not need to use magic, on 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking if (!j) { for (i = 0; i < salt->cnt; ++i) salt->H[i].magic = 0; // remove any 'magic' logic from this hash. } return salt_p; } static void set_key(char *key, int index) { /* Keep the PW, so we can return it in get_key if asked to do so */ strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } static int cmp_one(void *binary, int idx) { return chk[idx] == 1; } static int cmp_all(void *binary, int count) { int i,j; for (i=j=0; i<count; ++i) j+=chk[i]; /* hopefully addition like this is faster than 'count' conditional if statments */ return j; } /* this function is used by cmp_exact_loadfile. It will load the next * part of the file then decrypt the data, and return just how many * bytes were loaded. * * This function is 'similar' to an fread(). However, it also decrypts data */ static int get_next_decrypted_block(u8 *in, int sizeof_n, FILE *fp, u32 *inp_used, MY_WORD *pkey0, MY_WORD *pkey1, MY_WORD *pkey2) { u32 new_bytes = sizeof_n, k; u8 C; /* we have read all the bytes, we're done */ if (*inp_used >= salt->compLen) return 0; if (*inp_used + new_bytes > salt->compLen) /* this is the last block. Only load the bytes that are left */ new_bytes = salt->compLen - *inp_used; /* return the correct 'offset', so we can track when the file buffer has been fully read */ *inp_used += new_bytes; /* read the data */ if (fread(in, 1, new_bytes, fp) != new_bytes) return 0; /* decrypt the data bytes (in place, in same buffer). Easy to do, only requires 1 temp character variable. */ for (k = 0; k < new_bytes; ++k) { C = PKZ_MULT(in[k],(*pkey2)); pkey0->u = pkzip_crc32 (pkey0->u, C); pkey1->u = (pkey1->u + pkey0->c[KB1]) * 134775813 + 1; pkey2->u = pkzip_crc32 (pkey2->u, pkey1->c[KB2]); in[k] = C; } /* return the number of bytes we read from the file on this read */ return new_bytes; } /* Ok, this is the more complex example. Here we have to load the file (which may be HUGE) * decrypt the bytes from this file, and then inflate that data, and crc the bytes which we * have inflated from that stream. Then in the end, when we use all input bytes, if we have * inflated the right amount of data, ended up with a Z_STREAM_END, and the proper sized * decompression buffer, and the CRC matches, then we know we have the correct password * * This function is called from cmp_exact(), when cmp_exact finds out we have to decrypt from * the stored .zip file. * * this code is modifications made to the zpipe.c 'example' code from the zlib web site. */ #define CHUNK (64*1024) static int cmp_exact_loadfile(int index) { int ret; u32 have, k; z_stream strm; unsigned char in[CHUNK]; unsigned char out[CHUNK]; FILE *fp; MY_WORD key0, key1, key2; u8 *b, C; u32 inp_used, decomp_len=0; u32 crc = 0xFFFFFFFF; /* Open the zip file, and 'seek' to the proper offset of the binary zip blob */ fp = fopen(salt->fname, "rb"); if (!fp) { fprintf (stderr, "\nERROR, the zip file: %s has been removed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); return 1; } if (fseek(fp, salt->offset, SEEK_SET)) { fprintf (stderr, "\nERROR, the zip file: %s fseek() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); fclose(fp); return 1; } /* 'seed' the decryption with the IV. We do NOT use these bytes, they simply seed us. */ key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2]; k=12; if (fread(in, 1, 12, fp) != 12) { fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); fclose(fp); return 1; } b = salt->H[salt->full_zip_idx].h; do { C = PKZ_MULT(*b++,key2); key0.u = pkzip_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); } while(--k); /* this is 'sort of' our file pointer. It is the 'index' into the file's encrypted, compressed data buffer. */ /* we have read the 12 bytes of IV data, and updated our keys. Now we start processing the rest of the bytes */ /* to get the data to inflate, and crc check */ inp_used = 12; if (salt->H[salt->full_zip_idx].compType == 0) { // handle a stored blob (we do not have to decrypt it. int avail_in; crc = 0xFFFFFFFF; avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); while (avail_in) { for (k = 0; k < avail_in; ++k) crc = pkzip_crc32(crc,in[k]); avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); } fclose(fp); return ~crc == salt->crc32; } /* allocate inflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit2(&strm, -15); if (ret != Z_OK) /* if zlib is hosed, then likely there is no reason at all to continue. Better to exit, and let the user 'fix' the system */ perror("Error, initializing the libz inflateInit2() system\n"); /* decompress until deflate stream ends or end of file */ do { strm.avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); if (ferror(fp)) { inflateEnd(&strm); fclose(fp); fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); return 1; } if (strm.avail_in == 0) break; strm.next_in = in; /* run inflate() on input until output buffer not full */ do { strm.avail_out = CHUNK; strm.next_out = out; ret = inflate(&strm, Z_NO_FLUSH); switch (ret) { case Z_NEED_DICT: case Z_DATA_ERROR: case Z_MEM_ERROR: inflateEnd(&strm); fclose(fp); return 0; } have = CHUNK - strm.avail_out; /* now update our crc value */ for (k = 0; k < have; ++k) crc = pkzip_crc32(crc,out[k]); decomp_len += have; } while (strm.avail_out == 0); /* done when inflate() says it's done */ } while (ret != Z_STREAM_END); /* clean up and return */ inflateEnd(&strm); fclose(fp); return ret == Z_STREAM_END && inp_used == salt->compLen && decomp_len == salt->deCompLen && salt->crc32 == ~crc; } static int cmp_exact(char *source, int index) { const u8 *b; u8 C, *decompBuf, *decrBuf, *B; u32 k, crc; MY_WORD key0, key1, key2; z_stream strm; int ret; if (salt->H[salt->full_zip_idx].full_zip == 0) /* we do not have a zip file, this is 'checksum' only * POSSIBLY, we should log and output to screen that * we are not 100% 'sure' we have the right password!! */ return 1; #ifdef ZIP_DEBUG fprintf(stderr, "FULL zip test being done. (pass=%s)\n", saved_key[index]); #endif if (salt->fname == NULL) { /* we have the whole zip blob in memory, simply allocate a decrypt buffer, decrypt * in one step, crc and be done with it. This is the 'trivial' type. */ decrBuf = mem_alloc(salt->compLen-12); key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2]; b = salt->H[salt->full_zip_idx].h; k=12; do { C = PKZ_MULT(*b++,key2); key0.u = pkzip_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); } while(--k); B = decrBuf; k = salt->compLen-12; do { C = PKZ_MULT(*b++,key2); key0.u = pkzip_crc32 (key0.u, C); *B++ = C; key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); } while (--k); if (salt->H[salt->full_zip_idx].compType == 0) { // handle a stored blob (we do not have to decrypt it. crc = 0xFFFFFFFF; for (k = 0; k < salt->compLen-12; ++k) crc = pkzip_crc32(crc,decrBuf[k]); MEM_FREE(decrBuf); return ~crc == salt->crc32; } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = 0; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); decompBuf = mem_alloc(salt->deCompLen); strm.next_in = decrBuf; strm.avail_in = salt->compLen-12; strm.avail_out = salt->deCompLen; strm.next_out = decompBuf; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_STREAM_END || strm.total_out != salt->deCompLen) { MEM_FREE(decompBuf); MEM_FREE(decrBuf); return 0; } crc = 0xFFFFFFFF; for (k = 0; k < strm.total_out; ++k) crc = pkzip_crc32(crc,decompBuf[k]); MEM_FREE(decompBuf); MEM_FREE(decrBuf); return ~crc == salt->crc32; } /* we have a stand alone function to handle this more complex method of * loading from file, decrypting, decompressing, and crc'ing the data * It is complex enough of a task, to have its own function. */ return cmp_exact_loadfile(index); } #if USE_PKZIP_MAGIC const char exBytesUTF8[64] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 }; static int isLegalUTF8_char(const u8 *source, int length) { u8 a; int len; const u8 *srcptr; if (*source < 0xC0) return 1; len = exBytesUTF8[*source&0x3f]; srcptr = source+len; if (len+1 > length) return -1; switch (len) { default: return -1; /* Everything else falls through when "true"... */ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1; case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1; case 2: if ((a = (*--srcptr)) > 0xBF) return -1; switch (*source) { /* no fall-through in this inner switch */ case 0xE0: if (a < 0xA0) return -1; break; case 0xED: if (a > 0x9F) return -1; break; case 0xF0: if (a < 0x90) return -1; break; case 0xF4: if (a > 0x8F) return -1; break; default: if (a < 0x80) return -1; } case 1: if (*source >= 0x80 && *source < 0xC2) return -1; } if (*source > 0xF4) return -1; return len+1; } static int validate_ascii(const u8 *out, int inplen) { int i; int unicode=0; for (i = 0; i < inplen-1; ++i) { if (out[i] > 0x7E) { // first check to 'see' if this is a valid utf8 character. If so, let it 'pass'. if (unicode) return 0; // in unicode mode, we ONLY handle 'ascii' bytes in the low byte. if (out[i] > 0xC0) { int len; if(i > inplen-4) return 1; len = isLegalUTF8_char(&out[i], 5); if (len < 0) return 0; i += (len-1); } else { if (i) { // check for utf8 BOM \xEF \xBB \xBF if (out[0] == 0xEF && out[1] == 0xBB && out[2] == 0xBF) { i = 2; continue; } /* check for Unicode BOM (FF FE for utf16le, FE FF for utf16be, FF FE 00 00 for utf32le, not sure if 00 00 FE FF is utf32be, but likely is) */ if (out[0] == 0xFF && out[1] == 0xFE) { unicode = 1; i++; continue; } /* unicode BE bom */ if (out[0] == 0xFE && out[1] == 0xFF) { unicode = 1; i += 2; continue; } /* utf32 LE */ if (out[0] == 0xFF && out[1] == 0xFE && out[2] == 0 && out[3] == 0) { unicode = 3; i += 3; continue; } /* utf32 BE bom */ if (out[0] == 0 && out[1] == 0 && out[2] == 0xFE && out[3] == 0xFF) { unicode = 3; i += 6; continue; } // allow a 'single' byte > 0x7E as long as bytes following are ascii. if (out[1] <= 0x7E && out[1] >= 0x20) { ++i; continue; } return 0; } } } else if (out[i] < 0x20) { /* we do not need to deal with DOS EOF char 0x1a, since we will never have the 'end' of the file */ /* we do allow the ESC character for ANSI files, however, they are frequently also binary, so will fail in other places */ if (out[i]!='\n' && out[i]!='\r' && out[i]!='\t' && out[i]!=0x1B) return 0; } i += unicode; // skip the null bytes } return 1; } static int CheckSigs(const u8 *p, int len, ZIP_SIGS *pSig) { int i, j; for (i = 0; i < pSig->magic_count; ++i) { int fnd = 1; u8 *pS = pSig->magic_signature[i]; for (j = 0; j < pSig->magic_sig_len[i]; ++j) { if (p[j] != pS[j]) { fnd = 0; break; } } if (fnd) return 1; } return 0; } #endif /* note, Buf is the 'full' decrypted zip buffer (len bytes long). It DOES contain the first 3 bits, which have already * been decoded, and have told us we had a code 2 (var table block) * all done without BITS(), PULLBYTE(), BITSNEEDED() macros. We 'know' the data we need, and we know that we have * 'enough', so we do not worry about all of the overhead, and validation logic. * * In testing, this function catches ALL bad decryptions, except about 1/300 to 1/350. So, it is not too bad. */ MAYBE_INLINE static int check_inflate_CODE2(u8 *next) { u32 bits, hold, thisget, have, i; int left; u32 ncode; u32 ncount[2]; // ends up being an array of 8 u8 count values. But we can clear it, and later 'check' it with 2 u32 instructions. u8 *count; // this will point to ncount array. NOTE, this is alignment required 'safe' for Sparc systems or others requiring alignment. #if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1) // 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it. hold = *((u32*)next); #else hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24); #endif next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back. hold >>= 3; // we already processed 3 bits count = (u8*)ncount; if (257+(hold&0x1F) > 286) return 0; // nlen, but we do not use it. hold >>= 5; if(1+(hold&0x1F) > 30) return 0; // ndist, but we do not use it. hold >>= 5; ncode = 4+(hold&0xF); hold >>= 4; // we have 15 bits left. hold += ((u32)(*++next)) << 15; hold += ((u32)(*++next)) << 23; // we now have 31 bits. We need to know this for the loop below. bits = 31; // We have 31 bits now, in accum. If we are processing 19 codes, we do 7, then have 10 bits. // Add 16 more and have 26, then use 21, have 5. Then load 16 more, then eat 15 of them. have = 0; ncount[0] = ncount[1] = 0; for (;;) { if (have+7>ncode) thisget = ncode-have; else thisget = 7; have += thisget; bits -= thisget*3; while (thisget--) { ++count[hold&7]; hold>>=3; } if (have == ncode) break; hold += ((u32)(*++next)) << bits; bits += 8; hold += ((u32)(*++next)) << bits; bits += 8; } count[0] = 0; if (!ncount[0] && !ncount[1]) return 0; /* if no codes at all, then simply bail, that is invalid */ /* check for an over-subscribed or incomplete set of lengths */ /* this will catch about 319 out of 320 'bad' passwords that */ /* have made it into this function. Note, only 1/4 of the */ /* passwords which pass the checksum, can make it here. Of */ /* those, we drop 319/320 or about that many (a good check!) */ left = 1; for (i = 1; i <= 7; ++i) { left <<= 1; left -= count[i]; if (left < 0) return 0; /* over-subscribed */ } if (left > 0) return 0; /* incomplete set */ return 1; /* Passed this check! */ } //static code const * const lcode = lenfix; //static code const * const dcode = distfix; /* This function handles inflate CODE type 1. This is a 'fixed' table code. We set the fixed table, */ /* and then inflate some data (without writing anything. If we find any BAD lookback data, we can */ /* return a failure. We have 24 bytes of inflate data, and this almost always is more than enough */ /* to turn up an error. If we find we need more, we will do more than 24 */ MAYBE_INLINE static int check_inflate_CODE1(u8 *next, int left) { u32 whave = 0, op, bits, hold,len; code here; #if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1) // 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it. hold = *((u32*)next); #else hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24); #endif next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back. left -= 4; hold >>= 3; // we already processed 3 bits bits = 32-3; for (;;) { if (bits < 15) { if (left < 2) return 1; // we are out of bytes. Return we had no error. left -= 2; hold += (u32)(*++next) << bits; bits += 8; hold += (u32)(*++next) << bits; bits += 8; } here=lenfix[hold & 0x1FF]; op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op == 0) /* literal */ ++whave; else if (op & 16) { /* length base */ len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } if (bits < 15) { if (left < 2) return 1; /*we are out of bytes. Return we had no error.*/ left -= 2; hold += (u32)(*++next) << bits; bits += 8; hold += (u32)(*++next) << bits; bits += 8; } here = distfix[hold & 0x1F]; // dodist: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op & 16) { /* distance base */ u32 dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); if (dist > whave) return 0; /*invalid distance too far back*/ hold >>= op; bits -= op; //***** start of patched code from Pavel Semjanov (see original code below) whave += len; } else return 0; /*invalid distance code*/ } else if (op & 32) { // end of block [may present in short sequences, but only at the end.] NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes??? if (left == 0) return 1; return 0; } else { return 0; // invalid literal/length code. } //***** End of patched code from Pavel } } // original code block (for above), prior to patch from Pavel Semjanov [pavel@semjanov.com] // this code would be a direct drop in between the comments starting and stopping with //***** above // also the dodist label was commented out (no longer used). #if 0 whave += dist; } else if ((op & 64) == 0) { /* 2nd level distance code */ here = distfix[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else return 0; /*invalid distance code*/ } else if (op & 64) { // 2nd level length code. //here = lcode[here.val + (hold & ((1U << op) - 1))]; //goto dolen; // this causes an infinite loop. Also, I VERY seriously doubt, this will EVER happen in the first // 24 bytes of code. NOTE, there may be problems, in the fact this causes a inf loop!, but for now, // simply return 0, then debug later. return 0; } else if (op & 32) { // end of block NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes??? // It is VERY likely we do SHOULD NOT EVER hit this. If that is the case, return that this block is bogus. // check next OP (if we have enough bits left), if CODE=3, fail. If code==0, check return 0; } else { return 0; // invalid literal/length code. } #endif /* * Crypt_all simply performs the checksum .zip validatation of the data. It performs * this for ALL hashes provided. If any of them fail to match, then crypt all puts the * complement of the 'proper' checksum of the first hash into the output. These 2 bytes * are checked against the binary for this salt/password combination. Thus, if any * checksum fails, it will never match binary. However, if ALL of the checksums match * we then put the checksum bytes from the first hash, into our output data. Then, when * the binary check (cmp_all, cmp_one) is performed, it WILL match. NOTE, this does * not mean we have found the password. Just that all hashes quick check checksums * for this password 'work'. */ static int crypt_all(int *pcount, struct db_salt *_salt) { int _count = *pcount; int idx; #if (ZIP_DEBUG==2) static int CNT, FAILED, FAILED2; ++CNT; #endif // pkzip kinda sucks a little for multi-threading, since there is different amount of work to be // done, depenging upon the password. Thus, we pack in OMP_MOD passwords into each thread, and // hopefully some of the differnces will even themselves out in the end. If we have 2 threads // then thread 1 gets 0 to 127 password, and thread 2 gets 128-256. Once they 'get' their data, // there should be no mutexing of the runtime data, thus the threads should run fast. // Also, since we have 'multiple' files in a .zip file (and multiple checksums), we bail as at the // first time we fail to match checksum. So, there may be some threads which check more checksums. // Again, hopefully globbing many tests into a threads working set will flatten out these differences. #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (idx = 0; idx < _count; ++idx) { int cur_hash_count = salt->cnt; int cur_hash_idx = -1; MY_WORD key0, key1, key2; u8 C; const u8 *b; u8 curDecryBuf[256]; #if USE_PKZIP_MAGIC u8 curInfBuf[128]; #endif int k, SigChecked; u16 e, e2, v1, v2; z_stream strm; int ret; /* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */ if (dirty) { u8 *p = (u8*)saved_key[idx]; /* load the 'pwkey' one time, put it into the K12 array */ key0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL; do { key0.u = pkzip_crc32 (key0.u, *p++); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); } while (*p); K12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u; goto SkipKeyLoadInit; } do { // 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts // for the same key load), we do NOT perform the key compute, but instead load the pre-computed // key data from the array. key0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2]; SkipKeyLoadInit:; b = salt->H[++cur_hash_idx].h; k=11; e = salt->H[cur_hash_idx].c; e2 = salt->H[cur_hash_idx].c2; do { C = PKZ_MULT(*b++,key2); key0.u = pkzip_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); } while(--k); /* if the hash is a 2 byte checksum type, then check that value first */ /* There is no reason to continue if this byte does not check out. */ if (salt->chk_bytes == 2 && C != (e&0xFF) && C != (e2&0xFF)) goto Failed_Bailout; C = PKZ_MULT(*b++,key2); #if 1 // https://github.com/magnumripper/JohnTheRipper/issues/467 // Fixed, JimF. Added checksum test for crc32 and timestamp. if (C != (e>>8) && C != (e2>>8)) goto Failed_Bailout; #endif // Now, update the key data (with that last byte. key0.u = pkzip_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); // Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work. // What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single // 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then // we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then // we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have // to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type // 2 or 3, then we do the FULL inflate, CRC check here. e = 0; // First, we want to get the inflate CODE byte (the first one). C = PKZ_MULT(*b++,key2); SigChecked = 0; if ( salt->H[cur_hash_idx].compType == 0) { // handle a stored file. // We can ONLY deal with these IF we are handling 'magic' testing. #if USE_PKZIP_MAGIC // Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate. if (salt->H[cur_hash_idx].pSig->max_len) { int len = salt->H[cur_hash_idx].pSig->max_len; if (len > salt->H[cur_hash_idx].datlen-12) len = salt->H[cur_hash_idx].datlen-12; SigChecked = 1; curDecryBuf[0] = C; for (; e < len;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(&curDecryBuf[5], len-5)) goto Failed_Bailout; } else { if (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif continue; } #if 1 // https://github.com/magnumripper/JohnTheRipper/issues/467 // Ok, if this is a code 3, we are done. // Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file. if ( (C & 6) == 6) goto Failed_Bailout; #endif if ( (C & 6) == 0) { // Check that checksum2 is 0 or 1. If not, I 'think' we can be done if (C > 1) goto Failed_Bailout; // now get 4 bytes. This is the length. It is made up of 2 16 bit values. // these 2 values are checksumed, so it is easy to tell if the data is WRONG. // correct data is u16_1 == (u16_2^0xFFFF) curDecryBuf[0] = C; for (e = 0; e <= 4; ) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } v1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8); v2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8); if (v1 != (v2^0xFFFF)) goto Failed_Bailout; #if USE_PKZIP_MAGIC // Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate. if (salt->H[cur_hash_idx].pSig->max_len) { int len = salt->H[cur_hash_idx].pSig->max_len + 5; if (len > salt->H[cur_hash_idx].datlen-12) len = salt->H[cur_hash_idx].datlen-12; SigChecked = 1; for (; e < len;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(&curDecryBuf[5], len-5)) goto Failed_Bailout; } else { if (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif } else { // Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data) // We now have the 2 'hard' ones left (fixed table, and variable table) curDecryBuf[0] = C; if ((C&6) == 4) { // inflate 'code' 2 (variable table) #if (ZIP_DEBUG==2) static unsigned count, found; ++count; #endif // we need 4 bytes, + 2, + 4 at most. for (; e < 10;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (!check_inflate_CODE2(curDecryBuf)) goto Failed_Bailout; #if (ZIP_DEBUG==2) fprintf (stderr, "CODE2 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found); #endif } else { int til; #if (ZIP_DEBUG==2) static unsigned count, found; ++count; #endif til = 36; if (salt->H[cur_hash_idx].datlen-12 < til) til = salt->H[cur_hash_idx].datlen-12; for (; e < til;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (!check_inflate_CODE1(curDecryBuf, til)) goto Failed_Bailout; #if (ZIP_DEBUG==2) fprintf (stderr, "CODE1 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found); #endif } } #if USE_PKZIP_MAGIC // Ok, now see if we need to check sigs, or do a FULL inflate/crc check. if (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) { int til = 180; if (salt->H[cur_hash_idx].datlen-12 < til) til = salt->H[cur_hash_idx].datlen-12; for (; e < til;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = til; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); strm.next_in = curDecryBuf; strm.avail_out = sizeof(curInfBuf); strm.next_out = curInfBuf; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_OK) { // we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this // size, the return is Z_STREAM_END, BUT things are fine. if (ret == Z_STREAM_END && salt->deCompLen == strm.total_out) ; // things are ok. else goto Failed_Bailout; } if (!strm.total_out) goto Failed_Bailout; ret = salt->H[cur_hash_idx].pSig->max_len; if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(curInfBuf, strm.total_out)) goto Failed_Bailout; } else { if (strm.total_out < ret) goto Failed_Bailout; if (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif if (salt->H[cur_hash_idx].full_zip) { u8 inflateBufTmp[1024]; if (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) { for (;e < 200;) { key0.u = pkzip_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = pkzip_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = e; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); strm.next_in = curDecryBuf; strm.avail_out = sizeof(inflateBufTmp); strm.next_out = inflateBufTmp; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_OK) { #if (ZIP_DEBUG==2) fprintf(stderr, "fail=%d fail2=%d tot=%lld\n", ++FAILED, FAILED2, ((long long)CNT)*_count); #endif goto Failed_Bailout; } } goto KnownSuccess; } } while(--cur_hash_count); /* We got a checksum HIT!!!! All hash checksums matched. */ /* We load the proper checksum value for the gethash */ KnownSuccess: ; chk[idx] = 1; continue; Failed_Bailout: ; /* We load the wrong checksum value for the gethash */ chk[idx] = 0; } /* clear the 'dirty' flag. Then on multiple different salt calls, we will not have to */ /* encrypt the passwords again. They will have already been loaded in the K12[] array. */ dirty = 0; return _count; } struct fmt_main fmt_pkzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
annoylib_omp.h
// To use OpenMP replace the content of annoylib.h to this file context #ifndef ANNOYLIB_H #define ANNOYLIB_H #include <stdio.h> #include <sys/stat.h> #ifndef _MSC_VER #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <fcntl.h> #include <stddef.h> #include <omp.h> #if defined(_MSC_VER) && _MSC_VER == 1500 typedef unsigned char uint8_t; typedef signed __int32 int32_t; typedef unsigned __int64 uint64_t; #else #include <stdint.h> #endif #if defined(_MSC_VER) || defined(__MINGW32__) #ifndef NOMINMAX #define NOMINMAX #endif #include "mman.h" #include <windows.h> #else #include <sys/mman.h> #endif #include <cerrno> #include <string.h> #include <math.h> #include <vector> #include <algorithm> #include <queue> #include <limits> #ifdef _MSC_VER // Needed for Visual Studio to disable runtime checks for mempcy #pragma runtime_checks("s", off) #endif // This allows others to supply their own logger / error printer without // requiring Annoy to import their headers. See RcppAnnoy for a use case. #ifndef __ERROR_PRINTER_OVERRIDE__ #define showUpdate(...) { fprintf(stderr, __VA_ARGS__ ); } #else #define showUpdate(...) { __ERROR_PRINTER_OVERRIDE__( __VA_ARGS__ ); } #endif #ifndef _MSC_VER #define popcount __builtin_popcountll #else // See #293, #358 #define isnan(x) _isnan(x) #define popcount cole_popcount #endif #if !defined(NO_MANUAL_VECTORIZATION) && defined(__GNUC__) && (__GNUC__ >6) && defined(__AVX512F__) // See #402 #pragma message "Using 512-bit AVX instructions" #define USE_AVX512 #elif !defined(NO_MANUAL_VECTORIZATION) && defined(__AVX__) && defined (__SSE__) && defined(__SSE2__) && defined(__SSE3__) #pragma message "Using 128-bit AVX instructions" #define USE_AVX #else #pragma message "Using no AVX instructions" #endif #if defined(USE_AVX) || defined(USE_AVX512) #if defined(_MSC_VER) #include <intrin.h> #elif defined(__GNUC__) #include <x86intrin.h> #endif #endif #ifndef ANNOY_NODE_ATTRIBUTE #ifndef _MSC_VER #define ANNOY_NODE_ATTRIBUTE __attribute__((__packed__)) // TODO: this is turned on by default, but may not work for all architectures! Need to investigate. #else #define ANNOY_NODE_ATTRIBUTE #endif #endif using std::vector; using std::pair; using std::numeric_limits; using std::make_pair; inline void* remap_memory(void* _ptr, int _fd, size_t old_size, size_t new_size) { #ifdef __linux__ _ptr = mremap(_ptr, old_size, new_size, MREMAP_MAYMOVE); #else munmap(_ptr, old_size); #ifdef MAP_POPULATE _ptr = mmap(_ptr, new_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, _fd, 0); #else _ptr = mmap(_ptr, new_size, PROT_READ | PROT_WRITE, MAP_SHARED, _fd, 0); #endif #endif return _ptr; } namespace { template<typename S, typename Node> inline Node* get_node_ptr(const void* _nodes, const size_t _s, const S i) { return (Node*)((uint8_t *)_nodes + (_s * i)); } template<typename T> inline T dot(const T* x, const T* y, int f) { T s = 0; for (int z = 0; z < f; z++) { s += (*x) * (*y); x++; y++; } return s; } template<typename T> inline T manhattan_distance(const T* x, const T* y, int f) { T d = 0.0; for (int i = 0; i < f; i++) d += fabs(x[i] - y[i]); return d; } template<typename T> inline T euclidean_distance(const T* x, const T* y, int f) { // Don't use dot-product: avoid catastrophic cancellation in #314. T d = 0.0; for (int i = 0; i < f; ++i) { const T tmp=*x - *y; d += tmp * tmp; ++x; ++y; } return d; } #ifdef USE_AVX // Horizontal single sum of 256bit vector. inline float hsum256_ps_avx(__m256 v) { const __m128 x128 = _mm_add_ps(_mm256_extractf128_ps(v, 1), _mm256_castps256_ps128(v)); const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128)); const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55)); return _mm_cvtss_f32(x32); } template<> inline float dot<float>(const float* x, const float *y, int f) { float result = 0; if (f > 7) { __m256 d = _mm256_setzero_ps(); for (; f > 7; f -= 8) { d = _mm256_add_ps(d, _mm256_mul_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y))); x += 8; y += 8; } // Sum all floats in dot register. result += hsum256_ps_avx(d); } // Don't forget the remaining values. for (; f > 0; f--) { result += *x * *y; x++; y++; } return result; } template<> inline float manhattan_distance<float>(const float* x, const float* y, int f) { float result = 0; int i = f; if (f > 7) { __m256 manhattan = _mm256_setzero_ps(); __m256 minus_zero = _mm256_set1_ps(-0.0f); for (; i > 7; i -= 8) { const __m256 x_minus_y = _mm256_sub_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y)); const __m256 distance = _mm256_andnot_ps(minus_zero, x_minus_y); // Absolute value of x_minus_y (forces sign bit to zero) manhattan = _mm256_add_ps(manhattan, distance); x += 8; y += 8; } // Sum all floats in manhattan register. result = hsum256_ps_avx(manhattan); } // Don't forget the remaining values. for (; i > 0; i--) { result += fabsf(*x - *y); x++; y++; } return result; } template<> inline float euclidean_distance<float>(const float* x, const float* y, int f) { float result=0; if (f > 7) { __m256 d = _mm256_setzero_ps(); for (; f > 7; f -= 8) { const __m256 diff = _mm256_sub_ps(_mm256_loadu_ps(x), _mm256_loadu_ps(y)); d = _mm256_add_ps(d, _mm256_mul_ps(diff, diff)); // no support for fmadd in AVX... x += 8; y += 8; } // Sum all floats in dot register. result = hsum256_ps_avx(d); } // Don't forget the remaining values. for (; f > 0; f--) { float tmp = *x - *y; result += tmp * tmp; x++; y++; } return result; } #endif #ifdef USE_AVX512 template<> inline float dot<float>(const float* x, const float *y, int f) { float result = 0; if (f > 15) { __m512 d = _mm512_setzero_ps(); for (; f > 15; f -= 16) { //AVX512F includes FMA d = _mm512_fmadd_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y), d); x += 16; y += 16; } // Sum all floats in dot register. result += _mm512_reduce_add_ps(d); } // Don't forget the remaining values. for (; f > 0; f--) { result += *x * *y; x++; y++; } return result; } template<> inline float manhattan_distance<float>(const float* x, const float* y, int f) { float result = 0; int i = f; if (f > 15) { __m512 manhattan = _mm512_setzero_ps(); for (; i > 15; i -= 16) { const __m512 x_minus_y = _mm512_sub_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y)); manhattan = _mm512_add_ps(manhattan, _mm512_abs_ps(x_minus_y)); x += 16; y += 16; } // Sum all floats in manhattan register. result = _mm512_reduce_add_ps(manhattan); } // Don't forget the remaining values. for (; i > 0; i--) { result += fabsf(*x - *y); x++; y++; } return result; } template<> inline float euclidean_distance<float>(const float* x, const float* y, int f) { float result=0; if (f > 15) { __m512 d = _mm512_setzero_ps(); for (; f > 15; f -= 16) { const __m512 diff = _mm512_sub_ps(_mm512_loadu_ps(x), _mm512_loadu_ps(y)); d = _mm512_fmadd_ps(diff, diff, d); x += 16; y += 16; } // Sum all floats in dot register. result = _mm512_reduce_add_ps(d); } // Don't forget the remaining values. for (; f > 0; f--) { float tmp = *x - *y; result += tmp * tmp; x++; y++; } return result; } #endif template<typename T> inline T get_norm(T* v, int f) { return sqrt(dot(v, v, f)); } template<typename T, typename Random, typename Distance, typename Node> inline void two_means(const vector<Node*>& nodes, int f, Random& random, bool cosine, Node* p, Node* q) { /* This algorithm is a huge heuristic. Empirically it works really well, but I can't motivate it well. The basic idea is to keep two centroids and assign points to either one of them. We weight each centroid by the number of points assigned to it, so to balance it. */ static int iteration_steps = 200; size_t count = nodes.size(); size_t i = random.index(count); size_t j = random.index(count-1); j += (j >= i); // ensure that i != j Distance::template copy_node<T, Node>(p, nodes[i], f); Distance::template copy_node<T, Node>(q, nodes[j], f); if (cosine) { Distance::template normalize<T, Node>(p, f); Distance::template normalize<T, Node>(q, f); } Distance::init_node(p, f); Distance::init_node(q, f); int ic = 1, jc = 1; for (int l = 0; l < iteration_steps; l++) { size_t k = random.index(count); T di = ic * Distance::distance(p, nodes[k], f), dj = jc * Distance::distance(q, nodes[k], f); T norm = cosine ? get_norm(nodes[k]->v, f) : 1.0; if (!(norm > T(0))) { continue; } if (di < dj) { for (int z = 0; z < f; z++) p->v[z] = (p->v[z] * ic + nodes[k]->v[z] / norm) / (ic + 1); Distance::init_node(p, f); ic++; } else if (dj < di) { for (int z = 0; z < f; z++) q->v[z] = (q->v[z] * jc + nodes[k]->v[z] / norm) / (jc + 1); Distance::init_node(q, f); jc++; } } } } // namespace struct Base { template<typename T, typename S, typename Node> static inline void preprocess(void* nodes, size_t _s, const S node_count, const int f) { // Override this in specific metric structs below if you need to do any pre-processing // on the entire set of nodes passed into this index. } template<typename Node> static inline void zero_value(Node* dest) { // Initialize any fields that require sane defaults within this node. } template<typename T, typename Node> static inline void copy_node(Node* dest, const Node* source, const int f) { memcpy(dest->v, source->v, f * sizeof(T)); } template<typename T, typename Node> static inline void normalize(Node* node, int f) { T norm = get_norm(node->v, f); if (norm > 0) { for (int z = 0; z < f; z++) node->v[z] /= norm; } } }; struct Angular : Base { template<typename S, typename T> struct ANNOY_NODE_ATTRIBUTE Node { /* * We store a binary tree where each node has two things * - A vector associated with it * - Two children * All nodes occupy the same amount of memory * All nodes with n_descendants == 1 are leaf nodes. * A memory optimization is that for nodes with 2 <= n_descendants <= K, * we skip the vector. Instead we store a list of all descendants. K is * determined by the number of items that fits in the space of the vector. * For nodes with n_descendants == 1 the vector is a data point. * For nodes with n_descendants > K the vector is the normal of the split plane. * Note that we can't really do sizeof(node<T>) because we cheat and allocate * more memory to be able to fit the vector outside */ S n_descendants; union { S children[2]; // Will possibly store more than 2 T norm; }; T v[1]; // We let this one overflow intentionally. Need to allocate at least 1 to make GCC happy }; template<typename S, typename T> static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) { // want to calculate (a/|a| - b/|b|)^2 // = a^2 / a^2 + b^2 / b^2 - 2ab/|a||b| // = 2 - 2cos T pp = x->norm ? x->norm : dot(x->v, x->v, f); // For backwards compatibility reasons, we need to fall back and compute the norm here T qq = y->norm ? y->norm : dot(y->v, y->v, f); T pq = dot(x->v, y->v, f); T ppqq = pp * qq; if (ppqq > 0) return 2.0 - 2.0 * pq / sqrt(ppqq); else return 2.0; // cos is 0 } template<typename S, typename T> static inline T margin(const Node<S, T>* n, const T* y, int f) { return dot(n->v, y, f); } template<typename S, typename T, typename Random> static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) { T dot = margin(n, y, f); if (dot != 0) return (dot > 0); else return random.flip(); } template<typename S, typename T, typename Random> static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) { Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid two_means<T, Random, Angular, Node<S, T> >(nodes, f, random, true, p, q); for (int z = 0; z < f; z++) n->v[z] = p->v[z] - q->v[z]; Base::normalize<T, Node<S, T> >(n, f); free(p); free(q); } template<typename T> static inline T normalized_distance(T distance) { // Used when requesting distances from Python layer // Turns out sometimes the squared distance is -0.0 // so we have to make sure it's a positive number. return sqrt(std::max(distance, T(0))); } template<typename T> static inline T pq_distance(T distance, T margin, int child_nr) { if (child_nr == 0) margin = -margin; return std::min(distance, margin); } template<typename T> static inline T pq_initial_value() { return numeric_limits<T>::infinity(); } template<typename S, typename T> static inline void init_node(Node<S, T>* n, int f) { n->norm = dot(n->v, n->v, f); } static const char* name() { return "angular"; } }; struct DotProduct : Angular { template<typename S, typename T> struct ANNOY_NODE_ATTRIBUTE Node { /* * This is an extension of the Angular node with an extra attribute for the scaled norm. */ S n_descendants; S children[2]; // Will possibly store more than 2 T dot_factor; T v[1]; // We let this one overflow intentionally. Need to allocate at least 1 to make GCC happy }; static const char* name() { return "dot"; } template<typename S, typename T> static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) { return -dot(x->v, y->v, f); } template<typename Node> static inline void zero_value(Node* dest) { dest->dot_factor = 0; } template<typename S, typename T> static inline void init_node(Node<S, T>* n, int f) { } template<typename T, typename Node> static inline void copy_node(Node* dest, const Node* source, const int f) { memcpy(dest->v, source->v, f * sizeof(T)); dest->dot_factor = source->dot_factor; } template<typename S, typename T, typename Random> static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) { Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid DotProduct::zero_value(p); DotProduct::zero_value(q); two_means<T, Random, DotProduct, Node<S, T> >(nodes, f, random, true, p, q); for (int z = 0; z < f; z++) n->v[z] = p->v[z] - q->v[z]; n->dot_factor = p->dot_factor - q->dot_factor; DotProduct::normalize<T, Node<S, T> >(n, f); free(p); free(q); } template<typename T, typename Node> static inline void normalize(Node* node, int f) { T norm = sqrt(dot(node->v, node->v, f) + pow(node->dot_factor, 2)); if (norm > 0) { for (int z = 0; z < f; z++) node->v[z] /= norm; node->dot_factor /= norm; } } template<typename S, typename T> static inline T margin(const Node<S, T>* n, const T* y, int f) { return dot(n->v, y, f) + (n->dot_factor * n->dot_factor); } template<typename S, typename T, typename Random> static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) { T dot = margin(n, y, f); if (dot != 0) return (dot > 0); else return random.flip(); } template<typename T> static inline T normalized_distance(T distance) { return -distance; } template<typename T, typename S, typename Node> static inline void preprocess(void* nodes, size_t _s, const S node_count, const int f) { // This uses a method from Microsoft Research for transforming inner product spaces to cosine/angular-compatible spaces. // (Bachrach et al., 2014, see https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/XboxInnerProduct.pdf) // Step one: compute the norm of each vector and store that in its extra dimension (f-1) for (S i = 0; i < node_count; i++) { Node* node = get_node_ptr<S, Node>(nodes, _s, i); T norm = sqrt(dot(node->v, node->v, f)); if (isnan(norm)) norm = 0; node->dot_factor = norm; } // Step two: find the maximum norm T max_norm = 0; for (S i = 0; i < node_count; i++) { Node* node = get_node_ptr<S, Node>(nodes, _s, i); if (node->dot_factor > max_norm) { max_norm = node->dot_factor; } } // Step three: set each vector's extra dimension to sqrt(max_norm^2 - norm^2) for (S i = 0; i < node_count; i++) { Node* node = get_node_ptr<S, Node>(nodes, _s, i); T node_norm = node->dot_factor; T dot_factor = sqrt(pow(max_norm, static_cast<T>(2.0)) - pow(node_norm, static_cast<T>(2.0))); if (isnan(dot_factor)) dot_factor = 0; node->dot_factor = dot_factor; } } }; struct Hamming : Base { template<typename S, typename T> struct ANNOY_NODE_ATTRIBUTE Node { S n_descendants; S children[2]; T v[1]; }; static const size_t max_iterations = 20; template<typename T> static inline T pq_distance(T distance, T margin, int child_nr) { return distance - (margin != (unsigned int) child_nr); } template<typename T> static inline T pq_initial_value() { return numeric_limits<T>::max(); } template<typename T> static inline int cole_popcount(T v) { // Note: Only used with MSVC 9, which lacks intrinsics and fails to // calculate std::bitset::count for v > 32bit. Uses the generalized // approach by Eric Cole. // See https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSet64 v = v - ((v >> 1) & (T)~(T)0/3); v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); v = (v + (v >> 4)) & (T)~(T)0/255*15; return (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * 8; } template<typename S, typename T> static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) { size_t dist = 0; for (int i = 0; i < f; i++) { dist += popcount(x->v[i] ^ y->v[i]); } return dist; } template<typename S, typename T> static inline bool margin(const Node<S, T>* n, const T* y, int f) { static const size_t n_bits = sizeof(T) * 8; T chunk = n->v[0] / n_bits; return (y[chunk] & (static_cast<T>(1) << (n_bits - 1 - (n->v[0] % n_bits)))) != 0; } template<typename S, typename T, typename Random> static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) { return margin(n, y, f); } template<typename S, typename T, typename Random> static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) { size_t cur_size = 0; size_t i = 0; int dim = f * 8 * sizeof(T); for (; i < max_iterations; i++) { // choose random position to split at n->v[0] = random.index(dim); cur_size = 0; for (typename vector<Node<S, T>*>::const_iterator it = nodes.begin(); it != nodes.end(); ++it) { if (margin(n, (*it)->v, f)) { cur_size++; } } if (cur_size > 0 && cur_size < nodes.size()) { break; } } // brute-force search for splitting coordinate if (i == max_iterations) { int j = 0; for (; j < dim; j++) { n->v[0] = j; cur_size = 0; for (typename vector<Node<S, T>*>::const_iterator it = nodes.begin(); it != nodes.end(); ++it) { if (margin(n, (*it)->v, f)) { cur_size++; } } if (cur_size > 0 && cur_size < nodes.size()) { break; } } } } template<typename T> static inline T normalized_distance(T distance) { return distance; } template<typename S, typename T> static inline void init_node(Node<S, T>* n, int f) { } static const char* name() { return "hamming"; } }; struct Minkowski : Base { template<typename S, typename T> struct ANNOY_NODE_ATTRIBUTE Node { S n_descendants; T a; // need an extra constant term to determine the offset of the plane S children[2]; T v[1]; }; template<typename S, typename T> static inline T margin(const Node<S, T>* n, const T* y, int f) { return n->a + dot(n->v, y, f); } template<typename S, typename T, typename Random> static inline bool side(const Node<S, T>* n, const T* y, int f, Random& random) { T dot = margin(n, y, f); if (dot != 0) return (dot > 0); else return random.flip(); } template<typename T> static inline T pq_distance(T distance, T margin, int child_nr) { if (child_nr == 0) margin = -margin; return std::min(distance, margin); } template<typename T> static inline T pq_initial_value() { return numeric_limits<T>::infinity(); } }; struct Euclidean : Minkowski { template<typename S, typename T> static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) { return euclidean_distance(x->v, y->v, f); } template<typename S, typename T, typename Random> static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) { Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid two_means<T, Random, Euclidean, Node<S, T> >(nodes, f, random, false, p, q); for (int z = 0; z < f; z++) n->v[z] = p->v[z] - q->v[z]; Base::normalize<T, Node<S, T> >(n, f); n->a = 0.0; for (int z = 0; z < f; z++) n->a += -n->v[z] * (p->v[z] + q->v[z]) / 2; free(p); free(q); } template<typename T> static inline T normalized_distance(T distance) { return sqrt(std::max(distance, T(0))); } template<typename S, typename T> static inline void init_node(Node<S, T>* n, int f) { } static const char* name() { return "euclidean"; } }; struct Manhattan : Minkowski { template<typename S, typename T> static inline T distance(const Node<S, T>* x, const Node<S, T>* y, int f) { return manhattan_distance(x->v, y->v, f); } template<typename S, typename T, typename Random> static inline void create_split(const vector<Node<S, T>*>& nodes, int f, size_t s, Random& random, Node<S, T>* n) { Node<S, T>* p = (Node<S, T>*)malloc(s); // TODO: avoid Node<S, T>* q = (Node<S, T>*)malloc(s); // TODO: avoid two_means<T, Random, Manhattan, Node<S, T> >(nodes, f, random, false, p, q); for (int z = 0; z < f; z++) n->v[z] = p->v[z] - q->v[z]; Base::normalize<T, Node<S, T> >(n, f); n->a = 0.0; for (int z = 0; z < f; z++) n->a += -n->v[z] * (p->v[z] + q->v[z]) / 2; free(p); free(q); } template<typename T> static inline T normalized_distance(T distance) { return std::max(distance, T(0)); } template<typename S, typename T> static inline void init_node(Node<S, T>* n, int f) { } static const char* name() { return "manhattan"; } }; template<typename S, typename T> class AnnoyIndexInterface { public: virtual ~AnnoyIndexInterface() {}; virtual bool add_item(S item, const T* w, char** error=NULL) = 0; virtual bool build(int q, char** error=NULL) = 0; virtual bool unbuild(char** error=NULL) = 0; virtual bool save(const char* filename, bool prefault=false, char** error=NULL) = 0; virtual void unload() = 0; virtual bool load(const char* filename, bool prefault=false, char** error=NULL) = 0; virtual T get_distance(S i, S j) const = 0; virtual void get_nns_by_item(S item, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const = 0; virtual void get_nns_by_vector(const T* w, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const = 0; virtual S get_n_items() const = 0; virtual S get_n_trees() const = 0; virtual void verbose(bool v) = 0; virtual void get_item(S item, T* v) const = 0; virtual void set_seed(int q) = 0; virtual bool on_disk_build(const char* filename, char** error=NULL) = 0; }; template<typename S, typename T, typename Distance, typename Random> class AnnoyIndex : public AnnoyIndexInterface<S, T> { /* * We use random projection to build a forest of binary trees of all items. * Basically just split the hyperspace into two sides by a hyperplane, * then recursively split each of those subtrees etc. * We create a tree like this q times. The default q is determined automatically * in such a way that we at most use 2x as much memory as the vectors take. */ public: typedef Distance D; typedef typename D::template Node<S, T> Node; protected: const int _f; size_t _s; S _n_items; Random _random; void* _nodes; // Could either be mmapped, or point to a memory buffer that we reallocate S _n_nodes; S _nodes_size; vector<S> _roots; S _K; bool _loaded; bool _verbose; int _fd; bool _on_disk; bool _built; public: AnnoyIndex(int f) : _f(f), _random() { _s = offsetof(Node, v) + _f * sizeof(T); // Size of each node _verbose = false; _built = false; _K = (S) (((size_t) (_s - offsetof(Node, children))) / sizeof(S)); // Max number of descendants to fit into node reinitialize(); // Reset everything } ~AnnoyIndex() { unload(); } int get_f() const { return _f; } bool add_item(S item, const T* w, char** error=NULL) { return add_item_impl(item, w, error); } template<typename W> bool add_item_impl(S item, const W& w, char** error=NULL) { if (_loaded) { showUpdate("You can't add an item to a loaded index\n"); if (error) *error = (char *)"You can't add an item to a loaded index"; return false; } _allocate_size(item + 1); Node* n = _get(item); D::zero_value(n); n->children[0] = 0; n->children[1] = 0; n->n_descendants = 1; for (int z = 0; z < _f; z++) n->v[z] = w[z]; D::init_node(n, _f); if (item >= _n_items) _n_items = item + 1; return true; } bool on_disk_build(const char* file, char** error=NULL) { _on_disk = true; _fd = open(file, O_RDWR | O_CREAT | O_TRUNC, (int) 0600); if (_fd == -1) { showUpdate("Error: file descriptor is -1\n"); if (error) *error = strerror(errno); _fd = 0; return false; } _nodes_size = 1; if (ftruncate(_fd, _s * _nodes_size) == -1) { showUpdate("Error truncating file: %s\n", strerror(errno)); if (error) *error = strerror(errno); return false; } #ifdef MAP_POPULATE _nodes = (Node*) mmap(0, _s * _nodes_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, _fd, 0); #else _nodes = (Node*) mmap(0, _s * _nodes_size, PROT_READ | PROT_WRITE, MAP_SHARED, _fd, 0); #endif return true; } struct ThreadNodes { ThreadNodes() : _n_nodes(0), _nodes_size(0), _nodes(NULL) {} // actual num nodes added so far S _n_nodes; // buffer of num nodes created S _nodes_size; // nodes buffer void* _nodes; // roots in this thread vector<S> _roots; }; bool build(int q, char** error=NULL) { if (_loaded) { showUpdate("You can't build a loaded index\n"); if (error) *error = (char *)"You can't build a loaded index"; return false; } if (_built) { showUpdate("You can't build a built index\n"); if (error) *error = (char *)"You can't build a built index"; return false; } D::template preprocess<T, S, Node>(_nodes, _s, _n_items, _f); _n_nodes = _n_items; if (q != -1) { int num_threads = 16; vector<ThreadNodes> threads_buffer(num_threads); #pragma omp parallel for num_threads(num_threads) for (int tree = 0; tree < q; tree++) { int thread_id = omp_get_thread_num(); ThreadNodes& thread_nodes = threads_buffer[thread_id]; if (_verbose) showUpdate("pass %zd...\n", tree); //showUpdate("pass %zd with parallel for...\n", tree); vector<S> indices; for (S i = 0; i < _n_items; i++) { if (_get(i)->n_descendants >= 1) // Issue #223 indices.push_back(i); } S root = _make_tree(indices, true, thread_nodes); thread_nodes._roots.push_back(root); } // combine the results and recalculate the node index { int n_all_nodes = _n_nodes; for (int i = 0; i < threads_buffer.size(); i++) { ThreadNodes& thread_nodes = threads_buffer[i]; for (int j = 0; j < thread_nodes._n_nodes; j++) { Node* node = (Node*)((char*)thread_nodes._nodes + _s * j); // leaf node if (node->n_descendants <= _K) { for (int k = 0; k < node->n_descendants; k++) { // add current n_all_nodes to indices as we will be combining buffers into one node->children[k] += n_all_nodes; } } else { // splitting node // add current n_all_nodes to indices as we will be combining buffers into one node->children[0] += n_all_nodes; node->children[1] += n_all_nodes; } } for (int j = 0; j < thread_nodes._roots.size(); j++) { // add current n_all_nodes to indices as we will be combining buffers into one thread_nodes._roots[j] += n_all_nodes; _roots.push_back(thread_nodes._roots[j]); } n_all_nodes += thread_nodes._n_nodes; } // allocate the one combined buffer _allocate_size(n_all_nodes); // copy from thread buffer into combined buffer int bytes_offset = _s * _n_nodes; for (int i = 0; i < threads_buffer.size(); i++) { ThreadNodes& thread_nodes = threads_buffer[i]; int thread_bytes = thread_nodes._n_nodes * _s; memcpy((char*)_nodes + bytes_offset, thread_nodes._nodes, thread_bytes); bytes_offset += thread_bytes; } _n_nodes = n_all_nodes; } } else { while (_n_nodes < _n_items * 2) { if (_verbose) showUpdate("pass %zd...\n", _roots.size()); //showUpdate("pass %zd...\n", _roots.size()); vector<S> indices; for (S i = 0; i < _n_items; i++) { if (_get(i)->n_descendants >= 1) // Issue #223 indices.push_back(i); } _roots.push_back(_make_tree(indices, true)); } } // Also, copy the roots into the last segment of the array // This way we can load them faster without reading the whole file _allocate_size(_n_nodes + (S)_roots.size()); for (size_t i = 0; i < _roots.size(); i++) memcpy(_get(_n_nodes + (S)i), _get(_roots[i]), _s); _n_nodes += _roots.size(); if (_verbose) showUpdate("has %d nodes\n", _n_nodes); if (_on_disk) { _nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * _n_nodes); if (ftruncate(_fd, _s * _n_nodes)) { // TODO: this probably creates an index in a corrupt state... not sure what to do showUpdate("Error truncating file: %s\n", strerror(errno)); if (error) *error = strerror(errno); return false; } _nodes_size = _n_nodes; } _built = true; return true; } bool unbuild(char** error=NULL) { if (_loaded) { showUpdate("You can't unbuild a loaded index\n"); if (error) *error = (char *)"You can't unbuild a loaded index"; return false; } _roots.clear(); _n_nodes = _n_items; _built = false; return true; } bool save(const char* filename, bool prefault=false, char** error=NULL) { if (!_built) { showUpdate("You can't save an index that hasn't been built\n"); if (error) *error = (char *)"You can't save an index that hasn't been built"; return false; } if (_on_disk) { return true; } else { // Delete file if it already exists (See issue #335) unlink(filename); printf("path: %s\n", filename); FILE *f = fopen(filename, "wb"); if (f == NULL) { showUpdate("Unable to open: %s\n", strerror(errno)); if (error) *error = strerror(errno); return false; } if (fwrite(_nodes, _s, _n_nodes, f) != (size_t) _n_nodes) { showUpdate("Unable to write: %s\n", strerror(errno)); if (error) *error = strerror(errno); return false; } if (fclose(f) == EOF) { showUpdate("Unable to close: %s\n", strerror(errno)); if (error) *error = strerror(errno); return false; } unload(); return load(filename, prefault, error); } } void reinitialize() { _fd = 0; _nodes = NULL; _loaded = false; _n_items = 0; _n_nodes = 0; _nodes_size = 0; _on_disk = false; _roots.clear(); } void unload() { if (_on_disk && _fd) { close(_fd); munmap(_nodes, _s * _nodes_size); } else { if (_fd) { // we have mmapped data close(_fd); munmap(_nodes, _n_nodes * _s); } else if (_nodes) { // We have heap allocated data free(_nodes); } } reinitialize(); if (_verbose) showUpdate("unloaded\n"); } bool load(const char* filename, bool prefault=false, char** error=NULL) { _fd = open(filename, O_RDONLY, (int)0400); if (_fd == -1) { showUpdate("Error: file descriptor is -1\n"); if (error) *error = strerror(errno); _fd = 0; return false; } off_t size = lseek(_fd, 0, SEEK_END); if (size == -1) { showUpdate("lseek returned -1\n"); if (error) *error = strerror(errno); return false; } else if (size == 0) { showUpdate("Size of file is zero\n"); if (error) *error = (char *)"Size of file is zero"; return false; } else if (size % _s) { // Something is fishy with this index! showUpdate("Error: index size %zu is not a multiple of vector size %zu\n", (size_t)size, _s); if (error) *error = (char *)"Index size is not a multiple of vector size"; return false; } int flags = MAP_SHARED; if (prefault) { #ifdef MAP_POPULATE flags |= MAP_POPULATE; #else showUpdate("prefault is set to true, but MAP_POPULATE is not defined on this platform"); #endif } _nodes = (Node*)mmap(0, size, PROT_READ, flags, _fd, 0); _n_nodes = (S)(size / _s); // Find the roots by scanning the end of the file and taking the nodes with most descendants _roots.clear(); S m = -1; for (S i = _n_nodes - 1; i >= 0; i--) { S k = _get(i)->n_descendants; if (m == -1 || k == m) { _roots.push_back(i); m = k; } else { break; } } // hacky fix: since the last root precedes the copy of all roots, delete it if (_roots.size() > 1 && _get(_roots.front())->children[0] == _get(_roots.back())->children[0]) _roots.pop_back(); _loaded = true; _built = true; _n_items = m; if (_verbose) showUpdate("found %lu roots with degree %d\n", _roots.size(), m); return true; } T get_distance(S i, S j) const { return D::normalized_distance(D::distance(_get(i), _get(j), _f)); } void get_nns_by_item(S item, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const { // TODO: handle OOB const Node* m = _get(item); _get_all_nns(m->v, n, search_k, result, distances); } void get_nns_by_vector(const T* w, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const { _get_all_nns(w, n, search_k, result, distances); } S get_n_items() const { return _n_items; } S get_n_trees() const { return _roots.size(); } void verbose(bool v) { _verbose = v; } void get_item(S item, T* v) const { // TODO: handle OOB Node* m = _get(item); memcpy(v, m->v, (_f) * sizeof(T)); } void set_seed(int seed) { _random.set_seed(seed); } protected: void _allocate_size(S n) { if (n > _nodes_size) { const double reallocation_factor = 1.3; S new_nodes_size = std::max(n, (S) ((_nodes_size + 1) * reallocation_factor)); void *old = _nodes; if (_on_disk) { int rc = ftruncate(_fd, _s * new_nodes_size); if (_verbose && rc) showUpdate("File truncation error\n"); _nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * new_nodes_size); } else { _nodes = realloc(_nodes, _s * new_nodes_size); memset((char *) _nodes + (_nodes_size * _s) / sizeof(char), 0, (new_nodes_size - _nodes_size) * _s); } _nodes_size = new_nodes_size; if (_verbose) showUpdate("Reallocating to %d nodes: old_address=%p, new_address=%p\n", new_nodes_size, old, _nodes); } } void _allocate_size(S n, ThreadNodes& thread_nodes) { if (n > thread_nodes._nodes_size) { const double reallocation_factor = 1.3; S new_nodes_size = std::max(n, (S) ((thread_nodes._nodes_size + 1) * reallocation_factor)); void *old = thread_nodes._nodes; // Don't support on disk for multi thread tree building now // if (_on_disk) { // int rc = ftruncate(_fd, _s * new_nodes_size); // if (_verbose && rc) showUpdate("File truncation error\n"); // _nodes = remap_memory(_nodes, _fd, _s * _nodes_size, _s * new_nodes_size); // } else { thread_nodes._nodes = realloc(thread_nodes._nodes, _s * new_nodes_size); memset((char *) thread_nodes._nodes + (thread_nodes._nodes_size * _s) / sizeof(char), 0, (new_nodes_size - thread_nodes._nodes_size) * _s); //} thread_nodes._nodes_size = new_nodes_size; if (_verbose) showUpdate("Reallocating to %d nodes: old_address=%p, new_address=%p\n", new_nodes_size, old, thread_nodes._nodes); } } inline Node* _get(const S i) const { return get_node_ptr<S, Node>(_nodes, _s, i); } inline Node* _get(const S i, ThreadNodes& thread_nodes) const { return get_node_ptr<S, Node>(thread_nodes._nodes, _s, i); } S _make_tree(const vector<S >& indices, bool is_root, ThreadNodes& thread_nodes) { // The basic rule is that if we have <= _K items, then it's a leaf node, otherwise it's a split node. // There's some regrettable complications caused by the problem that root nodes have to be "special": // 1. We identify root nodes by the arguable logic that _n_items == n->n_descendants, regardless of how many descendants they actually have // 2. Root nodes with only 1 child need to be a "dummy" parent // 3. Due to the _n_items "hack", we need to be careful with the cases where _n_items <= _K or _n_items > _K if (indices.size() == 1 && !is_root) return indices[0]; if (indices.size() <= (size_t)_K && (!is_root || (size_t)_n_items <= (size_t)_K || indices.size() == 1)) { _allocate_size(thread_nodes._n_nodes + 1, thread_nodes); S item = thread_nodes._n_nodes++; Node* m = _get(item, thread_nodes); m->n_descendants = is_root ? _n_items : (S)indices.size(); // Using std::copy instead of a loop seems to resolve issues #3 and #13, // probably because gcc 4.8 goes overboard with optimizations. // Using memcpy instead of std::copy for MSVC compatibility. #235 // Only copy when necessary to avoid crash in MSVC 9. #293 if (!indices.empty()) memcpy(m->children, &indices[0], indices.size() * sizeof(S)); return item; } vector<Node*> children; for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; Node* n = _get(j); if (n) children.push_back(n); } Node* m = (Node*)malloc(_s); // TODO: avoid D::create_split(children, _f, _s, _random, m); vector<S> children_indices[2]; for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; Node* n = _get(j); if (n) { bool side = D::side(m, n->v, _f, _random); children_indices[side].push_back(j); } else { showUpdate("No node for index %d?\n", j); } } // If we didn't find a hyperplane, just randomize sides as a last option while (children_indices[0].size() == 0 || children_indices[1].size() == 0) { if (_verbose) showUpdate("\tNo hyperplane found (left has %ld children, right has %ld children)\n", children_indices[0].size(), children_indices[1].size()); if (_verbose && indices.size() > 100000) showUpdate("Failed splitting %lu items\n", indices.size()); children_indices[0].clear(); children_indices[1].clear(); // Set the vector to 0.0 for (int z = 0; z < _f; z++) m->v[z] = 0.0; for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; // Just randomize... children_indices[_random.flip()].push_back(j); } } int flip = (children_indices[0].size() > children_indices[1].size()); m->n_descendants = is_root ? _n_items : (S)indices.size(); for (int side = 0; side < 2; side++) { // run _make_tree for the smallest child first (for cache locality) m->children[side^flip] = _make_tree(children_indices[side^flip], false, thread_nodes); } _allocate_size(thread_nodes._n_nodes + 1, thread_nodes); S item = thread_nodes._n_nodes++; memcpy(_get(item, thread_nodes), m, _s); free(m); return item; } S _make_tree(const vector<S >& indices, bool is_root) { // The basic rule is that if we have <= _K items, then it's a leaf node, otherwise it's a split node. // There's some regrettable complications caused by the problem that root nodes have to be "special": // 1. We identify root nodes by the arguable logic that _n_items == n->n_descendants, regardless of how many descendants they actually have // 2. Root nodes with only 1 child need to be a "dummy" parent // 3. Due to the _n_items "hack", we need to be careful with the cases where _n_items <= _K or _n_items > _K if (indices.size() == 1 && !is_root) return indices[0]; if (indices.size() <= (size_t)_K && (!is_root || (size_t)_n_items <= (size_t)_K || indices.size() == 1)) { _allocate_size(_n_nodes + 1); S item = _n_nodes++; Node* m = _get(item); m->n_descendants = is_root ? _n_items : (S)indices.size(); // Using std::copy instead of a loop seems to resolve issues #3 and #13, // probably because gcc 4.8 goes overboard with optimizations. // Using memcpy instead of std::copy for MSVC compatibility. #235 // Only copy when necessary to avoid crash in MSVC 9. #293 if (!indices.empty()) memcpy(m->children, &indices[0], indices.size() * sizeof(S)); return item; } vector<Node*> children; for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; Node* n = _get(j); if (n) children.push_back(n); } vector<S> children_indices[2]; Node* m = (Node*)malloc(_s); // TODO: avoid D::create_split(children, _f, _s, _random, m); for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; Node* n = _get(j); if (n) { bool side = D::side(m, n->v, _f, _random); children_indices[side].push_back(j); } else { showUpdate("No node for index %d?\n", j); } } // If we didn't find a hyperplane, just randomize sides as a last option while (children_indices[0].size() == 0 || children_indices[1].size() == 0) { if (_verbose) showUpdate("\tNo hyperplane found (left has %ld children, right has %ld children)\n", children_indices[0].size(), children_indices[1].size()); if (_verbose && indices.size() > 100000) showUpdate("Failed splitting %lu items\n", indices.size()); children_indices[0].clear(); children_indices[1].clear(); // Set the vector to 0.0 for (int z = 0; z < _f; z++) m->v[z] = 0.0; for (size_t i = 0; i < indices.size(); i++) { S j = indices[i]; // Just randomize... children_indices[_random.flip()].push_back(j); } } int flip = (children_indices[0].size() > children_indices[1].size()); m->n_descendants = is_root ? _n_items : (S)indices.size(); for (int side = 0; side < 2; side++) { // run _make_tree for the smallest child first (for cache locality) m->children[side^flip] = _make_tree(children_indices[side^flip], false); } _allocate_size(_n_nodes + 1); S item = _n_nodes++; memcpy(_get(item), m, _s); free(m); return item; } void _get_all_nns(const T* v, size_t n, size_t search_k, vector<S>* result, vector<T>* distances) const { Node* v_node = (Node *)malloc(_s); // TODO: avoid D::template zero_value<Node>(v_node); memcpy(v_node->v, v, sizeof(T) * _f); D::init_node(v_node, _f); std::priority_queue<pair<T, S> > q; if (search_k == (size_t)-1) { search_k = n * _roots.size(); } for (size_t i = 0; i < _roots.size(); i++) { q.push(make_pair(Distance::template pq_initial_value<T>(), _roots[i])); } std::vector<S> nns; while (nns.size() < search_k && !q.empty()) { const pair<T, S>& top = q.top(); T d = top.first; S i = top.second; Node* nd = _get(i); q.pop(); if (nd->n_descendants == 1 && i < _n_items) { nns.push_back(i); } else if (nd->n_descendants <= _K) { const S* dst = nd->children; nns.insert(nns.end(), dst, &dst[nd->n_descendants]); } else { T margin = D::margin(nd, v, _f); q.push(make_pair(D::pq_distance(d, margin, 1), static_cast<S>(nd->children[1]))); q.push(make_pair(D::pq_distance(d, margin, 0), static_cast<S>(nd->children[0]))); } } // Get distances for all items // To avoid calculating distance multiple times for any items, sort by id std::sort(nns.begin(), nns.end()); vector<pair<T, S> > nns_dist; S last = -1; for (size_t i = 0; i < nns.size(); i++) { S j = nns[i]; if (j == last) continue; last = j; if (_get(j)->n_descendants == 1) // This is only to guard a really obscure case, #284 nns_dist.push_back(make_pair(D::distance(v_node, _get(j), _f), j)); } size_t m = nns_dist.size(); size_t p = n < m ? n : m; // Return this many items std::partial_sort(nns_dist.begin(), nns_dist.begin() + p, nns_dist.end()); for (size_t i = 0; i < p; i++) { if (distances) distances->push_back(D::normalized_distance(nns_dist[i].first)); result->push_back(nns_dist[i].second); } free(v_node); } }; #endif // vim: tabstop=2 shiftwidth=2
pgpdisk_fmt_plug.c
/* * Format for brute-forcing PGP Virtual Disk images. * * This software is Copyright (c) 2017, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pgpdisk; #elif FMT_REGISTERS_H john_register_one(&fmt_pgpdisk); #else #include <string.h> #include <openssl/cast.h> #include "arch.h" #include "misc.h" #include "memory.h" #include "common.h" #include "formats.h" #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 // this is a slow format #endif #endif #include "sha.h" #include "loader.h" #include "aes.h" #include "twofish.h" #include "pgpdisk_common.h" #include "memdbg.h" #define FORMAT_LABEL "pgpdisk" #define FORMAT_NAME "" #define ALGORITHM_NAME "PGP Disk / Virtual Disk SHA1 " ARCH_BITS_STR #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(int) #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define FORMAT_TAG "$pgpdisk$" #define FORMAT_TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct custom_salt *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP static int omp_t = 1; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); Twofish_initialise(); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out) } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef SHA1_DIGEST_LENGTH #define SHA1_DIGEST_LENGTH 20 // HashSaltSchedulePassphrase in original source code static void pgpdisk_kdf(char *password, unsigned char *salt, unsigned char *key, int key_length) { uint32_t bytesNeeded = key_length; uint32_t offset = 0; unsigned char hash[SHA1_DIGEST_LENGTH]; int plen; int iterations = cur_salt->iterations; SHA_CTX ctx; // SHA1 usage is hardcoded plen = strlen(password); while (bytesNeeded > 0) { uint32_t bytesThisTime = SHA1_DIGEST_LENGTH < bytesNeeded ? SHA1_DIGEST_LENGTH: bytesNeeded; uint32_t j = 0; // "j" has type uint8_t in the original code SHA1_Init(&ctx); if (offset > 0) { SHA1_Update(&ctx, key, SHA1_DIGEST_LENGTH); } SHA1_Update(&ctx, password, plen); SHA1_Final(hash, &ctx); SHA1_Init(&ctx); if (cur_salt->algorithm == 3) SHA1_Update(&ctx, salt, 8); // kNumSaltBytes = 8, for CAST5 else SHA1_Update(&ctx, salt, 16); // kNumSaltBytes = 16, for AES-256, Twofish for (j = 0; j < iterations; j++) { SHA1_Update(&ctx, hash, bytesThisTime); #if ARCH_LITTLE_ENDIAN SHA1_Update(&ctx, (uint8_t*)&j, 1); #else SHA1_Update(&ctx, ((uint8_t*)&j) + 3, 1); #endif } SHA1_Final(key + offset, &ctx); bytesNeeded -= bytesThisTime; offset += bytesThisTime; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { int i; for (i = 0; i < MAX_KEYS_PER_CRYPT; i++) { unsigned char key[40]; if (cur_salt->algorithm == 5 || cur_salt->algorithm == 6 || cur_salt->algorithm == 7) { AES_KEY aes_key; pgpdisk_kdf(saved_key[i+index], cur_salt->salt, key, 32); // DecryptPassphraseKey in original source code, compute CheckBytes AES_set_encrypt_key(key, 256, &aes_key); AES_ecb_encrypt(key, (unsigned char*)crypt_out[index+i], &aes_key, AES_ENCRYPT); } else if (cur_salt->algorithm == 4) { Twofish_key tkey; pgpdisk_kdf(saved_key[i+index], cur_salt->salt, key, 32); Twofish_prepare_key(key, 32, &tkey); Twofish_encrypt(&tkey, key, (unsigned char*)crypt_out[index+i]); } else if (cur_salt->algorithm == 3) { CAST_KEY ck; pgpdisk_kdf(saved_key[i+index], cur_salt->salt, key, 16); CAST_set_key(&ck, 16, key); memset((unsigned char*)crypt_out[index+i], 0, BINARY_SIZE); CAST_ecb_encrypt(key, (unsigned char*)crypt_out[index+i], &ck, CAST_ENCRYPT); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(saved_key[index])); } static char *get_key(int index) { return saved_key[index]; } static unsigned int pgpdisk_iteration_count(void *salt) { struct custom_salt *cs = salt; return (unsigned int)cs->iterations; } struct fmt_main fmt_pgpdisk = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, pgpdisk_tests }, { init, done, fmt_default_reset, fmt_default_prepare, pgpdisk_common_valid, fmt_default_split, get_binary, pgpdisk_common_get_salt, { pgpdisk_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__identity_int64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_uint64 // op(A') function: GB_unop_tran__identity_int64_uint64 // C type: int64_t // A type: uint64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_uint64 ( int64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[3], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,p,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_chain=(PointInfo **) AcquireQuantumMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_chain == (PointInfo **) NULL)) { if (monotone_chain != (PointInfo **) NULL) monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_chain=(PointInfo **) RelinquishMagickMemory(monotone_chain); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { unsigned int depth; for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[i])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[i])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[i] == ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { double diameter; diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IdentifyImageMonochrome(image,exception) != MagickFalse) return(BilevelType); if (IdentifyImageGray(image,exception) != UndefinedType) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((1UL*QuantumRange) <= MaxMap) { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=depth_map[ScaleQuantumToMap(q[i])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
ba81quad.h
/* Copyright 2012-2014 Joshua Nathaniel Pritikin and contributors This is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef _BA81QUAD_H_ #define _BA81QUAD_H_ #include "glue.h" #include <Eigen/Core> #include "libifa-rpf.h" class ba81NormalQuad { private: inline void pointToWhere(const int *quad, double *where, int upto); inline void decodeLocation(int qx, const int dims, int *quad); double One, ReciprocalOfOne; inline int sIndex(int sx, int qx) { //if (sx < 0 || sx >= state->numSpecific) Rf_error("Out of domain"); //if (qx < 0 || qx >= state->quadGridSize) Rf_error("Out of domain"); return qx * numSpecific + sx; }; inline void mapDenseSpace(double piece, const double *where, const double *whereGram, double *latentDist); inline void mapSpecificSpace(int sgroup, double piece, const double *where, const double *whereGram, double *latentDist); public: int quadGridSize; // rename to gridSize TODO int maxDims; int primaryDims; int numSpecific; int maxAbilities; std::vector<double> Qpoint; // quadGridSize int totalQuadPoints; // quadGridSize ^ maxDims int totalPrimaryPoints; // totalQuadPoints except for specific dim int weightTableSize; // dense: totalQuadPoints; 2tier: totalQuadPoints * numSpecific std::vector<double> priQarea; // totalPrimaryPoints std::vector<double> speQarea; // quadGridSize * numSpecific std::vector<double> wherePrep; // totalQuadPoints * maxDims Eigen::MatrixXd whereGram; // triangleLoc1(maxDims) x totalQuadPoints ba81NormalQuad(); void setOne(double one) { One = one; ReciprocalOfOne = 1/one; } void setup0(); void setup(double Qwidth, int Qpoints, double *means, Eigen::MatrixXd &priCov, Eigen::VectorXd &sVar); inline double getReciprocalOfOne() const { return ReciprocalOfOne; }; // For dense cov, Dweight is size totalQuadPoints // For two-tier, Dweight is numSpecific x totalQuadPoints inline void EAP(double *thrDweight, double scalingFactor, double *scorePad); }; void ba81NormalQuad::mapDenseSpace(double piece, const double *where, const double *whereGram, double *latentDist) { const int pmax = primaryDims; int gx = 0; int cx = maxAbilities; for (int d1=0; d1 < pmax; d1++) { double piece_w1 = piece * where[d1]; latentDist[d1] += piece_w1; for (int d2=0; d2 <= d1; d2++) { double piece_cov = piece * whereGram[gx]; latentDist[cx] += piece_cov; ++cx; ++gx; } } } void ba81NormalQuad::mapSpecificSpace(int sgroup, double piece, const double *where, const double *whereGram, double *latentDist) { const int pmax = primaryDims; int sdim = pmax + sgroup; double piece_w1 = piece * where[pmax]; latentDist[sdim] += piece_w1; double piece_var = piece * whereGram[triangleLoc0(pmax)]; int to = maxAbilities + triangleLoc0(sdim); latentDist[to] += piece_var; } void ba81NormalQuad::EAP(double *thrDweight, double scalingFactor, double *scorePad) { if (numSpecific == 0) { // use template to handle this branch at compile time? TODO for (int qx=0; qx < totalQuadPoints; ++qx) { mapDenseSpace(thrDweight[qx], &wherePrep[qx * maxDims], &whereGram.coeffRef(0, qx), scorePad); } } else { int qloc=0; for (int qx=0; qx < totalQuadPoints; qx++) { const double *whPrep = &wherePrep[qx * maxDims]; const double *whGram = &whereGram.coeffRef(0, qx); mapDenseSpace(thrDweight[qloc], whPrep, whGram, scorePad); for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { mapSpecificSpace(Sgroup, thrDweight[qloc], whPrep, whGram, scorePad); ++qloc; } } } const int padSize = maxAbilities + triangleLoc1(maxAbilities); for (int d1=0; d1 < padSize; d1++) { scorePad[d1] *= scalingFactor; } int cx = maxAbilities; for (int a1=0; a1 < primaryDims; ++a1) { for (int a2=0; a2 <= a1; ++a2) { double ma1 = scorePad[a1]; double ma2 = scorePad[a2]; scorePad[cx] -= ma1 * ma2; ++cx; } } for (int sx=0; sx < numSpecific; sx++) { int sdim = primaryDims + sx; double ma1 = scorePad[sdim]; scorePad[maxAbilities + triangleLoc0(sdim)] -= ma1 * ma1; } } class ifaGroup { private: SEXP Rdata; void verifyFactorNames(SEXP mat, const char *matName); public: const int numThreads; // item description related std::vector<const double*> spec; int maxItemDims; int numItems() const { return (int) spec.size(); } int impliedParamRows; // based on spec set int paramRows; double *param; // itemParam->data std::vector<const char*> itemNames; std::vector<int> itemOutcomes; std::vector<int> cumItemOutcomes; int totalOutcomes; std::vector<int> Sgroup; // item's specific group 0..numSpecific-1 // latent distribution double qwidth; int qpoints; ba81NormalQuad quad; ba81NormalQuad &getQuad() { return quad; }; bool twotier; // rename to detectTwoTier TODO int maxAbilities; int numSpecific; double *mean; double *cov; std::vector<std::string> factorNames; // data related SEXP dataRowNames; std::vector<const int*> dataColumns; std::vector<int> rowMap; // row index into MxData int getNumUnique() const { return (int) rowMap.size(); } const char *weightColumnName; double *rowWeight; private: int minItemsPerScore; public: void setMinItemsPerScore(int mips); std::vector<bool> rowSkip; // whether to treat the row as NA // workspace double *outcomeProb; // totalOutcomes * totalQuadPoints static const double SmallestPatternLik; int excludedPatterns; Eigen::ArrayXd patternLik; // numUnique inline static bool validPatternLik(double pl) { return std::isfinite(pl) && pl > SmallestPatternLik; } // TODO: // scores ifaGroup(int cores, bool _twotier); ~ifaGroup(); void setGridFineness(double width, int points); void import(SEXP Rlist); void importSpec(SEXP slotValue); void learnMaxAbilities(); void setLatentDistribution(int dims, double *mean, double *cov); inline double *getItemParam(int ix) { return param + paramRows * ix; } inline const int *dataColumn(int col) { return dataColumns[col]; }; void detectTwoTier(); void buildRowSkip(); void sanityCheck(); inline void ba81OutcomeProb(double *param, bool wantLog); inline void ba81LikelihoodSlow2(const int px, double *out); inline void cai2010EiEis(const int px, double *lxk, double *Eis, double *Ei); inline void cai2010part2(double *Qweight, double *Eis, double *Ei); }; // Depends on item parameters, but not latent distribution void ifaGroup::ba81OutcomeProb(double *param, bool wantLog) { const int maxDims = quad.maxDims; outcomeProb = Realloc(outcomeProb, totalOutcomes * quad.totalQuadPoints, double); #pragma omp parallel for num_threads(numThreads) for (int ix=0; ix < numItems(); ix++) { double *qProb = outcomeProb + cumItemOutcomes[ix] * quad.totalQuadPoints; const double *ispec = spec[ix]; int id = ispec[RPF_ISpecID]; int dims = ispec[RPF_ISpecDims]; Eigen::VectorXd ptheta(dims); double *iparam = param + paramRows * ix; rpf_prob_t prob_fn = wantLog? librpf_model[id].logprob : librpf_model[id].prob; for (int qx=0; qx < quad.totalQuadPoints; qx++) { double *where = quad.wherePrep.data() + qx * maxDims; for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*prob_fn)(ispec, iparam, ptheta.data(), qProb); qProb += itemOutcomes[ix]; } } } void ifaGroup::ba81LikelihoodSlow2(const int px, double *out) { const int totalQuadPoints = quad.totalQuadPoints; double *oProb = outcomeProb; std::vector<double> &priQarea = quad.priQarea; for (int qx=0; qx < totalQuadPoints; ++qx) { out[qx] = priQarea[qx]; } const int row = rowMap[px]; for (int ix=0; ix < numItems(); ix++) { int pick = dataColumns[ix][row]; if (pick == NA_INTEGER) { oProb += itemOutcomes[ix] * totalQuadPoints; continue; } pick -= 1; for (int qx=0; qx < totalQuadPoints; ++qx) { out[qx] *= oProb[pick]; oProb += itemOutcomes[ix]; } } } void ifaGroup::cai2010EiEis(const int px, double *lxk, double *Eis, double *Ei) { double *oProb = outcomeProb; const int totalQuadPoints = quad.totalQuadPoints; const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; std::vector<double> &speQarea = quad.speQarea; std::vector<double> &priQarea = quad.priQarea; for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sx=0; sx < specificPoints * numSpecific; sx++) { lxk[qloc] = speQarea[sx]; ++qloc; } } const int row = rowMap[px]; for (int ix=0; ix < numItems(); ix++) { int pick = dataColumns[ix][row]; if (pick == NA_INTEGER) { oProb += itemOutcomes[ix] * totalQuadPoints; continue; } pick -= 1; int Sgroup1 = Sgroup[ix]; double *out1 = lxk; for (int qx=0; qx < quad.totalQuadPoints; qx++) { out1[Sgroup1] *= oProb[pick]; oProb += itemOutcomes[ix]; out1 += numSpecific; } } for (int qx=0; qx < totalPrimaryPoints * numSpecific; ++qx) Eis[qx] = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) Ei[qx] = priQarea[qx]; int eisloc = 0; for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sx=0; sx < specificPoints; sx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { double piece = lxk[qloc]; Eis[eisloc + sgroup] += piece; ++qloc; } } for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Ei[qx] *= Eis[eisloc + sgroup] * quad.getReciprocalOfOne(); } eisloc += numSpecific; } } void ifaGroup::cai2010part2(double *Qweight, double *Eis, double *Ei) { const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } for (int qloc=0, eisloc=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { Qweight[qloc] *= Eis[eisloc + Sgroup]; ++qloc; } } } } struct BA81Dense {}; struct BA81TwoTier {}; struct BA81EngineBase { inline int getPrimaryPoints(class ifaGroup *state) { return state->quad.totalPrimaryPoints; }; inline double getPatLik(class ifaGroup *state, int px, double *lxk); }; double BA81EngineBase::getPatLik(class ifaGroup *state, int px, double *lxk) { const int pts = getPrimaryPoints(state); Eigen::ArrayXd &patternLik = state->patternLik; double patternLik1 = 0; for (int qx=0; qx < pts; qx++) { patternLik1 += lxk[qx]; } // This uses the previous iteration's latent distribution. // If we recompute patternLikelihood to get the current // iteration's expected scores then it speeds up convergence. // However, recomputing patternLikelihood and dependent // math takes much longer than simply using the data // we have available here. This is even more true for the // two-tier model. if (!ifaGroup::validPatternLik(patternLik1)) { #pragma omp atomic state->excludedPatterns += 1; patternLik[px] = 0; return 0; } patternLik[px] = patternLik1; return patternLik1; } template <typename T, typename CovType> struct BA81OmitEstep { void begin(class ifaGroup *state, T extraData) {}; void addRow(class ifaGroup *state, T extraData, int px, double *Qweight, int thrId) {}; void recordTable(class ifaGroup *state, T extraData) {}; bool hasEnd() { return false; } }; template < typename T, typename CovTypePar, template <typename> class LatentPolicy, template <typename, typename> class EstepPolicy > struct BA81Engine : LatentPolicy<T>, EstepPolicy<T, CovTypePar>, BA81EngineBase { void ba81Estep1(class ifaGroup *state, T extraData); }; template < typename T, template <typename> class LatentPolicy, template <typename, typename> class EstepPolicy > struct BA81Engine<T, BA81Dense, LatentPolicy, EstepPolicy> : LatentPolicy<T>, EstepPolicy<T, BA81Dense>, BA81EngineBase { typedef BA81Dense CovType; void ba81Estep1(class ifaGroup *state, T extraData); }; template < typename T, template <typename> class LatentPolicy, template <typename, typename> class EstepPolicy > void BA81Engine<T, BA81Dense, LatentPolicy, EstepPolicy>::ba81Estep1(class ifaGroup *state, T extraData) { ba81NormalQuad &quad = state->getQuad(); const int numUnique = state->getNumUnique(); const int numThreads = state->numThreads; Eigen::VectorXd thrQweight; thrQweight.resize(quad.weightTableSize * numThreads); state->excludedPatterns = 0; state->patternLik.resize(numUnique); Eigen::ArrayXd &patternLik = state->patternLik; std::vector<bool> &rowSkip = state->rowSkip; EstepPolicy<T, CovType>::begin(state, extraData); LatentPolicy<T>::begin(state, extraData); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) { patternLik[px] = 0; continue; } int thrId = omp_get_thread_num(); double *Qweight = thrQweight.data() + quad.weightTableSize * thrId; state->ba81LikelihoodSlow2(px, Qweight); double patternLik1 = getPatLik(state, px, Qweight); if (patternLik1 == 0) continue; LatentPolicy<T>::normalizeWeights(state, extraData, px, Qweight, patternLik1, thrId); EstepPolicy<T, CovType>::addRow(state, extraData, px, Qweight, thrId); } if (EstepPolicy<T, CovType>::hasEnd() && LatentPolicy<T>::hasEnd()) { #pragma omp parallel sections { { EstepPolicy<T, CovType>::recordTable(state, extraData); } #pragma omp section { LatentPolicy<T>::end(state, extraData); } } } else { EstepPolicy<T, CovType>::recordTable(state, extraData); LatentPolicy<T>::end(state, extraData); } } template < typename T, template <typename> class LatentPolicy, template <typename, typename> class EstepPolicy > struct BA81Engine<T, BA81TwoTier, LatentPolicy, EstepPolicy> : LatentPolicy<T>, EstepPolicy<T, BA81TwoTier>, BA81EngineBase { typedef BA81TwoTier CovType; void ba81Estep1(class ifaGroup *state, T extraData); }; template < typename T, template <typename> class LatentPolicy, template <typename, typename> class EstepPolicy > void BA81Engine<T, BA81TwoTier, LatentPolicy, EstepPolicy>::ba81Estep1(class ifaGroup *state, T extraData) { ba81NormalQuad &quad = state->getQuad(); const int numSpecific = quad.numSpecific; const int numUnique = state->getNumUnique(); const int numThreads = state->numThreads; Eigen::VectorXd thrQweight; thrQweight.resize(quad.weightTableSize * numThreads); state->excludedPatterns = 0; state->patternLik.resize(numUnique); Eigen::ArrayXd &patternLik = state->patternLik; std::vector<bool> &rowSkip = state->rowSkip; EstepPolicy<T, CovType>::begin(state, extraData); LatentPolicy<T>::begin(state, extraData); const int totalPrimaryPoints = quad.totalPrimaryPoints; Eigen::ArrayXXd thrEi(totalPrimaryPoints, numThreads); Eigen::ArrayXXd thrEis(totalPrimaryPoints * numSpecific, numThreads); #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) { patternLik[px] = 0; continue; } int thrId = omp_get_thread_num(); double *Qweight = thrQweight.data() + quad.weightTableSize * thrId; double *Ei = &thrEi.coeffRef(0, thrId); double *Eis = &thrEis.coeffRef(0, thrId); state->cai2010EiEis(px, Qweight, Eis, Ei); double patternLik1 = getPatLik(state, px, Ei); if (patternLik1 == 0) continue; if (!EstepPolicy<T, CovType>::hasEnd() && !LatentPolicy<T>::hasEnd()) continue; state->cai2010part2(Qweight, Eis, Ei); LatentPolicy<T>::normalizeWeights(state, extraData, px, Qweight, patternLik1, thrId); EstepPolicy<T, CovType>::addRow(state, extraData, px, Qweight, thrId); } if (EstepPolicy<T, CovType>::hasEnd() && LatentPolicy<T>::hasEnd()) { #pragma omp parallel sections { { EstepPolicy<T, CovType>::recordTable(state, extraData); } #pragma omp section { LatentPolicy<T>::end(state, extraData); } } } else { EstepPolicy<T, CovType>::recordTable(state, extraData); LatentPolicy<T>::end(state, extraData); } } #endif
bitcoin_fmt_plug.c
/* bitcoin-qt (bitcoin) wallet cracker patch for JtR. Hacked together during * April of 2013 by Dhiru Kholia <dhiru at openwall dot com>. * * Also works for Litecoin-Qt (litecoin) wallet files! * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru at openwall dot com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * This cracks password protected bitcoin (bitcoin-qt) "wallet" files. * * bitcoin => https://github.com/bitcoin/bitcoin * * Thanks to Solar for asking to add support for bitcoin wallet files. * * Works fine with bitcoin-core-0.14.0 from March, 2017. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_bitcoin; #elif FMT_REGISTERS_H john_register_one(&fmt_bitcoin); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 1 #endif static int omp_t = 1; #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "sha2.h" #include "aes.h" #include "johnswap.h" #include "simd-intrinsics.h" #include "jumbo.h" #include "memdbg.h" #define FORMAT_LABEL "Bitcoin" #define FORMAT_NAME "Bitcoin Core" #define FORMAT_TAG "$bitcoin$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #ifdef SIMD_COEF_64 #define ALGORITHM_NAME "SHA512 AES " SHA512_ALGORITHM_NAME #else #if ARCH_BITS >= 64 #define ALGORITHM_NAME "SHA512 AES 64/" ARCH_BITS_STR " " SHA2_LIB #else #define ALGORITHM_NAME "SHA512 AES 32/" ARCH_BITS_STR " " SHA2_LIB #endif #endif #if !defined (SHA512_DIGEST_LENGTH) #define SHA512_DIGEST_LENGTH 64 #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define SALT_SIZE sizeof(struct custom_salt) #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define SZ 128 static struct fmt_tests bitcoin_tests[] = { /* bitcoin wallet hashes */ {"$bitcoin$96$169ce74743c260678fbbba92e926198702fd84e46ba555190f6f3d82f6852e4adeaa340d2ac065288e8605f13d1d7c86$16$26049c64dda292d5$177864$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "openwall"}, {"$bitcoin$96$bd97a08e00e38910550e76848949285b9702fe64460f70d464feb2b63f83e1194c745e58fa4a0f09ac35e5777c507839$16$26049c64dda292d5$258507$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "password"}, {"$bitcoin$96$4eca412eeb04971428efec70c9e18fb9375be0aa105e7eec55e528d0ba33a07eb6302add36da86736054dee9140ec9b8$16$26049c64dda292d5$265155$96$62aee49c1967b5635b663fc3b047d8bc562f7000921453ab15b98e5a5f2d2adc74393e789fe15c5a3fbc4625536be98a$66$020027f255fbfa6d4c010a1a5984e487443c68e1b32869ccfde92e92005814fd27", "strongpassword"}, /* litecoin wallet hash */ {"$bitcoin$96$54401984b32448917b6d18b7a11debe91d62aaa343ab62ed98e1d3063f30817832c744360331df94cbf1dcececf6d00e$16$bfbc8ee2c07bbb4b$194787$96$07a206d5422640cfa65a8482298ad8e8598b94d99e2c4ce09c9d015b734632778cb46541b8c10284b9e14e5468b654b9$66$03fe6587bf580ee38b719f0b8689c80d300840bbc378707dce51e6f1fe20f49c20", "isyourpasswordstronger"}, /* bitcoin-core-0.14.0 wallet */ {"$bitcoin$96$8e7be42551c822c7e55a384e15b4fbfec69ceaed000925870dfb262d3381ed4405507f6c94defbae174a218eed0b5ce8$16$b469e6dbd76926cf$244139$96$ec03604094ada8a5d76bbdb455d260ac8b202ec475d5362d334314c4e7012a2f4b8f9cf8761c9862cd20892e138cd29e$66$03fdd0341a72d1a119ea1de51e477f0687a2bf601c07c032cc87ef82e0f8f49b19", "password@12345"}, /* bitcoin-core-0.14.0 wallet */ {"$bitcoin$96$2559c50151aeec013a9820c571fbee02e5892a3ead07607ee8de9d0ff55798cff6fe60dbd71d7873cb794a03e0d63b70$16$672204f8ab168ff6$136157$96$a437e8bd884c928603ee00cf85eaaf9245a071efa763db03ab485cb757f155976edc7294a6a731734f383850fcac4316$66$03ff84bb48f454662b91a6e588af8752da0674efa5dae82e7340152afcc38f4ba4", "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int any_cracked, *cracked; static size_t cracked_size; static struct custom_salt { unsigned char cry_master[SZ]; int cry_master_length; unsigned char cry_salt[SZ]; int cry_salt_length; int cry_rounds; unsigned char ckey[SZ]; int ckey_length; unsigned char public_key[SZ]; int public_key_length; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc_align(sizeof(*cracked), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } // #define BTC_DEBUG #ifdef BTC_DEBUG static void print_hex(unsigned char *str, int len) { int i; for (i = 0; i < len; ++i) printf("%02x", str[i]); printf("\n"); } #endif static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p = NULL; int res; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "$")) == NULL) /* cry_master_length (of the hex string) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* cry_master */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_master */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt_length (length of hex string) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* cry_salt */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and cry_salt */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* cry_rounds */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* ckey_length (of hex) */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* ckey */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and ckey */ goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* public_key_length */ goto err; if (!isdec(p)) goto err; res = atoi(p); if ((p = strtokm(NULL, "$")) == NULL) /* public_key */ goto err; if (strlen(p) != res || strlen(p) > SZ * 2) /* validates atoi() and public_key */ goto err; if (!ishexlc(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { int i; char *p; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.cry_master_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.cry_master_length; i++) cs.cry_master[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.cry_salt_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.cry_salt_length; i++) cs.cry_salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.cry_rounds = atoi(p); p = strtokm(NULL, "$"); cs.ckey_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.ckey_length; i++) cs.ckey[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "$"); cs.public_key_length = atoi(p) / 2; p = strtokm(NULL, "$"); for (i = 0; i < cs.public_key_length; i++) cs.public_key[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char output[SZ]; SHA512_CTX sha_ctx; int i; #ifdef SIMD_COEF_64 char unaligned_buf[MAX_KEYS_PER_CRYPT*SHA_BUF_SIZ*sizeof(uint64_t)+MEM_ALIGN_SIMD]; uint64_t *key_iv = (uint64_t*)mem_align(unaligned_buf, MEM_ALIGN_SIMD); JTR_ALIGN(8) unsigned char hash1[SHA512_DIGEST_LENGTH]; // 512 bits int index2; for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) { // The first hash for this password SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, saved_key[index+index2], strlen(saved_key[index+index2])); SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length); SHA512_Final(hash1, &sha_ctx); // Now copy and convert hash1 from flat into SIMD_COEF_64 buffers. for (i = 0; i < SHA512_DIGEST_LENGTH/sizeof(uint64_t); ++i) { #if COMMON_DIGEST_FOR_OPENSSL key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.hash[i]; // this is in BE format #else key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = sha_ctx.h[i]; #endif } // We need to set ONE time, the upper half of the data buffer. We put the 0x80 byte (in BE format), at offset // 512-bits (SHA512_DIGEST_LENGTH) multiplied by the SIMD_COEF_64 (same as MAX_KEYS_PER_CRYPT), then zero // out the rest of the buffer, putting 512 (#bits) at the end. Once this part of the buffer is set up, we never // touch it again, for the rest of the crypt. We simply overwrite the first half of this buffer, over and over // again, with BE results of the prior hash. key_iv[ SHA512_DIGEST_LENGTH/sizeof(uint64_t) * SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64 ] = 0x8000000000000000ULL; for (i = (SHA512_DIGEST_LENGTH/sizeof(uint64_t)+1); i < 15; i++) key_iv[i*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = 0; key_iv[15*SIMD_COEF_64 + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64] = (SHA512_DIGEST_LENGTH << 3); } for (i = 1; i < cur_salt->cry_rounds; i++) // start at 1; the first iteration is already done SIMDSHA512body(key_iv, key_iv, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); for (index2 = 0; index2 < MAX_KEYS_PER_CRYPT; index2++) { AES_KEY aes_key; unsigned char key[32]; unsigned char iv[16]; // Copy and convert from SIMD_COEF_64 buffers back into flat buffers, in little-endian for (i = 0; i < sizeof(key)/sizeof(uint64_t); i++) // the derived key ((uint64_t *)key)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*i + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]); for (i = 0; i < sizeof(iv)/sizeof(uint64_t); i++) // the derived iv ((uint64_t *)iv)[i] = JOHNSWAP64(key_iv[SIMD_COEF_64*(sizeof(key)/sizeof(uint64_t) + i) + (index2&(SIMD_COEF_64-1)) + index2/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]); AES_set_decrypt_key(key, 256, &aes_key); AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, iv, AES_DECRYPT); if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) { cracked[index + index2] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } #else AES_KEY aes_key; unsigned char key_iv[SHA512_DIGEST_LENGTH]; // buffer for both the derived key and iv SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, saved_key[index], strlen(saved_key[index])); SHA512_Update(&sha_ctx, cur_salt->cry_salt, cur_salt->cry_salt_length); SHA512_Final(key_iv, &sha_ctx); for (i = 1; i < cur_salt->cry_rounds; i++) { // start at 1; the first iteration is already done SHA512_Init(&sha_ctx); SHA512_Update(&sha_ctx, key_iv, SHA512_DIGEST_LENGTH); SHA512_Final(key_iv, &sha_ctx); } AES_set_decrypt_key(key_iv, 256, &aes_key); AES_cbc_encrypt(cur_salt->cry_master, output, cur_salt->cry_master_length, &aes_key, key_iv + 32, AES_DECRYPT); if (check_pkcs_pad(output, cur_salt->cry_master_length, 16) == 32) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } #endif } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } static void bitcoin_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int)my_salt->cry_rounds; } struct fmt_main fmt_bitcoin = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, bitcoin_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, bitcoin_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
iwork_fmt_plug.c
/* JtR format to crack iWork '09, and '13 / '14 files. * * This software is Copyright (c) 2015, Dhiru Kholia <kholia at kth.se> and * Maxime Hulliger <hulliger at kth.se>, and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code may be freely used and modified for any purpose. * * Big thanks to Sean Patrick O'Brien for making this format possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_iwork; #elif FMT_REGISTERS_H john_register_one(&fmt_iwork); #else #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "pbkdf2_hmac_sha1.h" #include "jumbo.h" #include "memdbg.h" #include "iwork_common.h" #define FORMAT_LABEL "iwork" #define FORMAT_NAME "Apple iWork '09 / '13 / '14" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*fctx) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct format_context *fctx; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { fctx = (struct format_context *)salt; } static void iwork_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } static int iwork_decrypt(struct format_context *fctx, unsigned char *key, unsigned char *iv, unsigned char *data) { unsigned char out[BLOBLEN]; unsigned char ivec[IVLEN]; uint8_t hash[32]; SHA256_CTX ctx; AES_KEY aes_decrypt_key; AES_set_decrypt_key(key, 128, &aes_decrypt_key); memcpy(ivec, iv, 16); AES_cbc_encrypt(fctx->blob, out, BLOBLEN, &aes_decrypt_key, ivec, AES_DECRYPT); // The last 32 bytes should be equal to the SHA256 of the first 32 bytes (IWPasswordVerifier.m) SHA256_Init(&ctx); SHA256_Update(&ctx, out, 32); SHA256_Final(hash, &ctx); return memcmp(hash, &out[32], 32) == 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*cracked_count); #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][16]; int i; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, fctx->salt, fctx->salt_length, fctx->iterations, pout, 16, 0); #else for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) pbkdf2_sha1((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), fctx->salt, fctx->salt_length, fctx->iterations, master[i], 16, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cracked[index+i] = iwork_decrypt(fctx, master[i], fctx->iv, fctx->blob); } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_iwork = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, iwork_tests }, { init, done, fmt_default_reset, fmt_default_prepare, iwork_common_valid, fmt_default_split, fmt_default_binary, iwork_common_get_salt, { iwork_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, iwork_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unop__identity_int16_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_uint8) // op(A') function: GB (_unop_tran__identity_int16_uint8) // C type: int16_t // A type: uint8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_uint8) ( int16_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__isge_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__isge_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__isge_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__isge_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp32) // A*D function (colscale): GB (_AxD__isge_fp32) // D*A function (rowscale): GB (_DxB__isge_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__isge_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__isge_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp32) // C=scalar+B GB (_bind1st__isge_fp32) // C=scalar+B' GB (_bind1st_tran__isge_fp32) // C=A+scalar GB (_bind2nd__isge_fp32) // C=A'+scalar GB (_bind2nd_tran__isge_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_FP32 || GxB_NO_ISGE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint16) // A*D function (colscale): GB (_AxD__isge_uint16) // D*A function (rowscale): GB (_DxB__isge_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint16) // C=scalar+B GB (_bind1st__isge_uint16) // C=scalar+B' GB (_bind1st_tran__isge_uint16) // C=A+scalar GB (_bind2nd__isge_uint16) // C=A'+scalar GB (_bind2nd_tran__isge_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT16 || GxB_NO_ISGE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tinyexr.h
/* Copyright (c) 2014 - 2018, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (0) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-5) #define TINYEXR_ERROR_CANT_OPEN_FILE (-6) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-7) #define TINYEXR_ERROR_INVALID_HEADER (-8) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-9) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute custom_attributes[TINYEXR_MAX_ATTRIBUTES]; EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { to be removed. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if succes. // Returns negative value and may set error string in `err` when there's an // error extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #endif // __cplusplus > 199711L #ifdef _OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef _MSC_VER #pragma warning(pop) #endif } #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static const int kEXRVersionSize = 8; static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { return false; } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } } HeaderInfo; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); (void)ret; compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning( \ disable : 4267) // 'argument': conversion from '__int64' to 'int', // possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; if (0 > (maxLength -= count)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static void DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); assert(ret == static_cast<int>(uncompressed_size)); (void)ret; // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // int hlink[HUF_ENCSIZE]; long long *fHeap[HUF_ENCSIZE]; *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); long long scode[HUF_ENCSIZE]; memset(scode, 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode); memcpy(frq, scode, sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode > ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #define getCode(po, rlc, c, lc, in, out, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; unsigned short *oe = out + no; const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; getCode(pl.p[j], rlc, c, lc, in, out, oe); break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; getCode(pl.lit, rlc, c, lc, in, out, oe); } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(long long freq[HUF_ENCSIZE], const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; long long freq[HUF_ENCSIZE]; countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq, &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq, im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq, raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, unsigned short raw[], int nRaw) { if (nCompressed == 0) { if (nRaw != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, nRaw, raw); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap, minNonZero, maxNonZero); unsigned short lut[USHORT_RANGE]; unsigned short maxValue = forwardLutFromBitmap(bitmap, lut); applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } unsigned char bitmap[BITMAP_SIZE]; unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap, 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } unsigned short lut[USHORT_RANGE]; memset(lut, 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap, lut); // // Huffman decoding // int length; length = *(reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut, &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); assert(ret); (void)ret; // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); tinyexr::DecompressRle(reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len)); // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned short)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; hf.u = line_ptr[u]; tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + c * static_cast<size_t>(width) * sizeof(float)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { float val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + c * static_cast<size_t>(width) * sizeof(unsigned int)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += y * x_stride; } else { outLine += (height - 1 - y) * x_stride; } for (int u = 0; u < width; u++) { unsigned int val = line_ptr[u]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } return true; } static void DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static void ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { assert(0); } } } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) = "Failed to parse channel info."; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) = "# of channels is zero."; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } assert(info.attributes.size() < TINYEXR_MAX_ATTRIBUTES); exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels); bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { return TINYEXR_ERROR_INVALID_DATA; } size_t data_size = size - (size_t(offsets[tile_idx]) + sizeof(int) * 5); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + size_t(offsets[tile_idx])); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } if (tile_coordinates[3] != 0) { return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { return TINYEXR_ERROR_INVALID_DATA; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; exr_image->num_tiles = static_cast<int>(num_tiles); } } else { // scanline format exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #ifdef _OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size - (size_t(offsets[y_idx]) + sizeof(int) * 2); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + size_t(offsets[y_idx])); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else { int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; // assert(num_lines > 0); if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y line_no -= exr_header->data_window[1]; if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } } // omp parallel } if (invalid_data) { return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 if (err) { (*err) = "Invalid data window value."; } return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { if (err) { (*err) = "Invalid data height value."; } return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { if (err) { (*err) = "Invalid data window value."; } return TINYEXR_ERROR_INVALID_DATA; } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset value."; } return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { if (err) { (*err) = "Cannot reconstruct lineOffset table."; } return TINYEXR_ERROR_INVALID_DATA; } } } return DecodeChunk(exr_image, exr_header, offsets, head, size); } } // namespace tinyexr int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { if (out_rgba == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } if (exr_version.multipart || exr_version.non_image) { if (err) { (*err) = "Loading multipart or DeepImage is not supported yet.\n"; } return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if ((idxA == 0) && (idxR == -1) && (idxG == -1) && (idxB == -1)) { // Alpha channel only. if (exr_header.tiled) { // todo.implement this } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } else { // Assume RGB(A) if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { if (err) { (*err) = "Invalid argument.\n"; } // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // May leak #else (*err) = strdup(err_str.c_str()); // May leak #endif } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { if (err) { (*err) = "Invalid argument.\n"; } return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } if (idxR == -1) { if (err) { (*err) = "R channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { if (err) { (*err) = "G channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { if (err) { (*err) = "B channel not found\n"; } // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return 0; // @fixme } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { if (err) { (*err) = "Pixel type must be FLOAT for ZFP compression."; } return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<unsigned char> data; std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #ifdef _OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = f32.f; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = h16.u; } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); line_ptr[x] = val; } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 1024 + static_cast<unsigned int>( 1.2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { data.insert(data.end(), data_list[i].begin(), data_list[i].end()); offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } { memory.insert(memory.end(), data.begin(), data.end()); } assert(memory.size() > 0); (*memory_out) = static_cast<unsigned char *>(malloc(memory.size())); memcpy((*memory_out), &memory.at(0), memory.size()); return memory.size(); // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "PIZ compression is not supported in this build."; } return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { if (err) { (*err) = "ZFP compression is not supported in this build."; } return 0; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { if (err) { (*err) = "Cannot write a file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if ((mem_size > 0) && mem) { fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); if (err) { (*err) = "File size is zero."; } return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { if (err) { (*err) = "Invalid magic number."; } return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { if (err) { (*err) = "Unsupported version or scanline."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { if (err) { (*err) = "Unsupported compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { if (err) { (*err) = "Failed to parse channel info."; } return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { if (err) { (*err) = "Invalid channels format."; } return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { if (err) { (*err) = "Unsupported format."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui = *reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; f16.u = *reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f = *reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err) { #ifdef _WIN32 (*err) = _strdup(err_str.c_str()); // may leak #else (*err) = strdup(err_str.c_str()); // may leak #endif } return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { if (err) { (*err) = "`chunkCount' attribute is not found in the header."; } return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { if (err) { (*err) = "fread error."; } return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { if (err) { (*err) = "EXRHeader is not initialized."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { if (err) { (*err) = "Invalid offset size."; } return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { assert(0); return TINYEXR_ERROR_INVALID_DATA; } } int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size); if (ret != TINYEXR_SUCCESS) { return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { if (err) { (*err) = "Invalid argument."; } return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { if (err) { (*err) = "Cannot read file."; } return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename) { if ((components == 1) || components == 3 || components == 4) { // OK } else { return TINYEXR_ERROR_INVALID_ARGUMENT; } // Assume at least 16x16 pixels. if (width < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; if (height < 16) return TINYEXR_ERROR_INVALID_ARGUMENT; EXRHeader header; InitEXRHeader(&header); EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } const char *err; int ret = SaveEXRImageToFile(&image, &header, outfilename, &err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
convolution_winograd_transform_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; v4f32 _v5_25 = __msa_fill_w_f32(5.25f); v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f); v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f); v4f32 _v0_25 = __msa_fill_w_f32(0.25f); v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f); v4f32 _v0_5 = __msa_fill_w_f32(0.5f); v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02)); v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05)); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp7m, tmp[7][m], 0); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03); v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05); v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05); v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); __msa_st_w((v4i32)_tmp6m, tmp[6][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02)); v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05)); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03); v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05); v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b); v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05); v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); __msa_st_w((v4i32)_r0tm6, r0_tm_6, 0); __msa_st_w((v4i32)_r0tm7, r0_tm_7, 0); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } static void conv3x3s1_winograd64_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[6][8][4]; v4f32 _v32 = __msa_fill_w_f32(32.f); v4f32 _v16 = __msa_fill_w_f32(16.f); v4f32 _v8 = __msa_fill_w_f32(8.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0); v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0); v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6); v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c); v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c); v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c); v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06); v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c))); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c)); v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out04, output0 + 4 * 4, 0); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c)); v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b))); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); __msa_st_w((v4i32)_out05, output0 + 4 * 5, 0); output0 += outw * 4; } } } } } static void conv3x3s1_winograd42_transform_input_pack4_msa(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; v4f32 _vm5 = __msa_fill_w_f32(-5.f); v4f32 _vm4 = __msa_fill_w_f32(-4.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _vm2 = __msa_fill_w_f32(-2.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02); v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02)); v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02); v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02)); v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02)); v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } static void conv3x3s1_winograd42_transform_output_pack4_msa(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); v4f32 _bias0 = biasptr ? (v4f32)__msa_ld_w(biasptr + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[4][6][4]; v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v8 = __msa_fill_w_f32(8.f); // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b); v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b); v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b); v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b)); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b)); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); output0 += outw * 4; } } } } }
apply.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_APPLY_H_ #define SRC_SINGLENODE_APPLY_H_ template <typename Ta, typename Tb> void apply_dense_segment(Ta* v1, int * bitvector, int * nnz, int num_ints, Tb* v2, int * bitvector2, void (*add_fp)(const Ta&, Tb*, void*), void* vsp) { #pragma omp parallel for for(int i = 0 ; i < num_ints ; i++) { bitvector2[i] = bitvector2[i] | bitvector[i]; } int tmp_nnz = 0; #pragma omp parallel for reduction(+:tmp_nnz) for(int ii = 0 ; ii < num_ints ; ii++) { int cnt = _popcnt32(bitvector[ii]); if(cnt == 0) continue; //if(_popcnt32(bitvector[ii]) == 0) continue; tmp_nnz += cnt; for(int i = ii*32 ; i < (ii+1)*32 ; i++) { if(get_bitvector(i, bitvector)) { Ta tmp = v1[i]; add_fp(tmp, &(v2[i]), vsp); } } } *nnz = tmp_nnz; } template <typename Ta, typename Tb> void apply_segment(const DenseSegment<Ta> * s_in, DenseSegment<Tb> * s_out, void (*add_fp)(const Ta&, Tb*, void*), void* vsp) { s_out->alloc(); s_out->initialize(); apply_dense_segment(s_in->properties->value, s_in->properties->bit_vector, &(s_out->properties->nnz), s_in->num_ints, s_out->properties->value, s_out->properties->bit_vector, add_fp, vsp); } #endif // SRC_SINGLENODE_APPLY_H_
GB_binop__minus_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__minus_int16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_03__minus_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__minus_int16) // A*D function (colscale): GB (_AxD__minus_int16) // D*A function (rowscale): GB (_DxB__minus_int16) // C+=B function (dense accum): GB (_Cdense_accumB__minus_int16) // C+=b function (dense accum): GB (_Cdense_accumb__minus_int16) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__minus_int16) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__minus_int16) // C=scalar+B GB (_bind1st__minus_int16) // C=scalar+B' GB (_bind1st_tran__minus_int16) // C=A+scalar GB (_bind2nd__minus_int16) // C=A'+scalar GB (_bind2nd_tran__minus_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij - bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x - y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINUS || GxB_NO_INT16 || GxB_NO_MINUS_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__minus_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__minus_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__minus_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__minus_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__minus_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__minus_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__minus_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__minus_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = Bx [p] ; Cx [p] = (x - bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__minus_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = Ax [p] ; Cx [p] = (aij - y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x - aij) ; \ } GrB_Info GB (_bind1st_tran__minus_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij - y) ; \ } GrB_Info GB (_bind2nd_tran__minus_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mtSpGEMM.h
#ifndef _mtSpGEMM_h #define _mtSpGEMM_h #include "CombBLAS.h" namespace combblas { /* Multithreaded prefix sum Inputs: in: an input array size: the length of the input array "in" nthreads: number of threads used to compute the prefix sum Output: return an array of size "size+1" the memory of the output array is allocated internallay Example: in = [2, 1, 3, 5] out = [0, 2, 3, 6, 11] */ template <typename T> T* prefixsum(T* in, int size, int nthreads) { std::vector<T> tsum(nthreads+1); tsum[0] = 0; T* out = new T[size+1]; out[0] = 0; T* psum = &out[1]; #ifdef THREADED #pragma omp parallel #endif { int ithread = 0; #ifdef THREADED ithread = omp_get_thread_num(); #endif T sum = 0; #ifdef THREADED #pragma omp for schedule(static) #endif for (int i=0; i<size; i++) { sum += in[i]; psum[i] = sum; } tsum[ithread+1] = sum; #ifdef THREADED #pragma omp barrier #endif T offset = 0; for(int i=0; i<(ithread+1); i++) { offset += tsum[i]; } #ifdef THREADED #pragma omp for schedule(static) #endif for (int i=0; i<size; i++) { psum[i] += offset; } } return out; } // multithreaded HeapSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = estimateNNZ(A, B, aux,false); // don't free aux IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; IT nnzc = colptrC[Bdcsc->nzc]; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<HeapEntry<IT,NT1>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } size_t Bnzc = (size_t) Bdcsc->nzc; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bnzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); HeapEntry<IT,NT1> * wset = globalheapVec[myThread].data(); IT hsize = 0; for(size_t j = 0; j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std::make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); return spTuplesC; } template <typename IT, typename NT> bool sort_less(const std::pair<IT, NT> &left, const std::pair<IT, NT> &right) { return left.first < right.first; } // Hybrid approach of multithreaded HeapSpGEMM and HashSpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalHybridSpGEMM (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB, IT * aux = nullptr) { IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size //IT * aux; bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(nA, aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif // std::cout << "numThreads: " << numThreads << std::endl; IT* flopC = estimateFLOP(A, B, aux); //IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); //IT flop = flopptr[Bdcsc->nzc]; // std::cout << "FLOP of A * B is " << flop << std::endl; IT* colnnzC = estimateNNZ_Hash(A, B, flopC, aux); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bdcsc->nzc]; //double compression_ratio = (double)flop / nnzc; // std::cout << "NNZ of A * B is " << nnzc << std::endl; // std::cout << "Compression ratio is " << compression_ratio << std::endl; std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); //std::tuple<IT,IT,NTO> * tuplesC = new std::tuple<IT,IT,NTO>[nnzc]; // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); //std::vector<std::vector< std::pair<IT,NTO>>> globalHashVecAll(numThreads); std::vector<std::vector< std::pair<uint32_t,NTO>>> globalHashVecAll(numThreads); // uint32_t because integerSort accepts only uint32_t std::vector<std::vector< HeapEntry<IT,NT1>>> globalHeapVecAll(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ // IT hashSelected = 0; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); double cr = static_cast<double>(flopptr[i+1] - flopptr[i]) / (colptrC[i+1] - colptrC[i]); if (cr < 2.0) // Heap Algorithm { if(globalHeapVecAll[myThread].size() < nnzcolB) globalHeapVecAll[myThread].resize(nnzcolB); //std::vector<HeapEntry<IT,NT1>> globalheapVec(nnzcolB); //HeapEntry<IT, NT1> * wset = globalheapVec.data(); HeapEntry<IT, NT1> * wset = globalHeapVecAll[myThread].data(); IT hsize = 0; for(size_t j = 0; j < nnzcolB; ++j) // create the initial heap { if(colinds[j].first != colinds[j].second) // current != end { wset[hsize++] = HeapEntry< IT,NT1 > (Adcsc->ir[colinds[j].first], j, Adcsc->numx[colinds[j].first]); } } std::make_heap(wset, wset+hsize); IT curptr = colptrC[i]; while(hsize > 0) { std::pop_heap(wset, wset + hsize); // result is stored in wset[hsize-1] IT locb = wset[hsize-1].runr; // relative location of the nonzero in B's current column NTO mrhs = SR::multiply(wset[hsize-1].num, Bdcsc->numx[Bdcsc->cp[i]+locb]); if (!SR::returnedSAID()) { if( (curptr > colptrC[i]) && std::get<0>(tuplesC[curptr-1]) == wset[hsize-1].key) { std::get<2>(tuplesC[curptr-1]) = SR::add(std::get<2>(tuplesC[curptr-1]), mrhs); } else { tuplesC[curptr++]= std::make_tuple(wset[hsize-1].key, Bdcsc->jc[i], mrhs) ; } } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { // runr stays the same ! wset[hsize-1].key = Adcsc->ir[colinds[locb].first]; wset[hsize-1].num = Adcsc->numx[colinds[locb].first]; std::push_heap(wset, wset+hsize); } else { --hsize; } } } // Finish Heap else // Hash Algorithm { // #pragma omp atomic // hashSelected++; const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output) size_t ht_size = minHashTableSize; while(ht_size < nnzcolC) //ht_size is set as 2^n { ht_size <<= 1; } if(globalHashVecAll[myThread].size() < ht_size) globalHashVecAll[myThread].resize(ht_size); //std::vector<HeapEntry<IT,NT1>> globalheapVec(nnzcolB); //HeapEntry<IT, NT1> * wset = globalheapVec.data(); //HeapEntry<IT, NT1> * wset = globalheapVecAll[myThread].data(); //std::vector< std::pair<IT,NTO>> globalHashVec(ht_size); //std::pair<IT,NTO>* globalHashVec = globalHashVecAll[myThread].data(); std::pair<uint32_t,NTO>* globalHashVec = globalHashVecAll[myThread].data(); // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) // Initialize hash tables for(size_t j=0; j < ht_size; ++j) { globalHashVec[j].first = -1; } // Multiply and add on Hash table for (size_t j=0; j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; NT2 t_bval = Bdcsc->numx[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; k < colinds[j].second; ++k) { NTO mrhs = SR::multiply(Adcsc->numx[k], t_bval); IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash].first == key) //key is found in hash table { globalHashVec[hash].second = SR::add(mrhs, globalHashVec[hash].second); break; } else if (globalHashVec[hash].first == -1) //key is not registered yet { globalHashVec[hash].first = key; globalHashVec[hash].second = mrhs; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } // gather non-zero elements from hash table, and then sort them by row indices size_t index = 0; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { globalHashVec[index++] = globalHashVec[j]; } } //std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NTO>); //std::sort(globalHashVecAll[myThread].begin(), globalHashVecAll[myThread].begin() + index, sort_less<IT, NTO>); integerSort<NTO>(globalHashVecAll[myThread].data(), index); IT curptr = colptrC[i]; for (size_t j=0; j < index; ++j) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; if(deleteAux) delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); // std::cout << "localspgemminfo," << flop << "," << nnzc << "," << compression_ratio << "," << t1-t0 << std::endl; // std::cout << hashSelected << ", " << Bdcsc->nzc << ", " << (float)hashSelected / Bdcsc->nzc << std::endl; return spTuplesC; } // Hash table based local SpGEMM template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples<IT, NTO> * LocalSpGEMMHash (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB, bool sort=true) { double t0=MPI_Wtime(); IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return new SpTuples<IT, NTO>(0, mdim, ndim); } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); IT nA = A.getncol(); float cf = static_cast<float>(nA+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size IT * aux; Adcsc->ConstructAux(nA, aux); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif // std::cout << "numThreads: " << numThreads << std::endl; IT* flopC = estimateFLOP(A, B); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; // std::cout << "FLOP of A * B is " << flop << std::endl; IT* colnnzC = estimateNNZ_Hash(A, B, flopC); IT* colptrC = prefixsum<IT>(colnnzC, Bdcsc->nzc, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bdcsc->nzc]; double compression_ratio = (double)flop / nnzc; // std::cout << "NNZ of A * B is " << nnzc << std::endl; // std::cout << "Compression ratio is " << compression_ratio << std::endl; // std::tuple<IT,IT,NTO> * tuplesC = static_cast<std::tuple<IT,IT,NTO> *> (::operator new (sizeof(std::tuple<IT,IT,NTO>[nnzc]))); std::tuple<IT,IT,NTO> * tuplesC = new std::tuple<IT,IT,NTO>[nnzc]; // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); } // IT hashSelected = 0; #ifdef THREADED #pragma omp parallel for #endif for(size_t i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); // #pragma omp atomic // hashSelected++; const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; //nnz in the current column of C (=Output) size_t ht_size = minHashTableSize; while(ht_size < nnzcolC) //ht_size is set as 2^n { ht_size <<= 1; } //std::vector< std::pair<IT,NTO>> globalHashVec(ht_size); std::vector< std::pair<uint32_t,NTO>> globalHashVec(ht_size); //uint32_t because integerSort accepts only uint32_t // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) // Initialize hash tables for(size_t j=0; j < ht_size; ++j) { globalHashVec[j].first = -1; } // Multiply and add on Hash table for (size_t j=0; j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; NT2 t_bval = Bdcsc->numx[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; k < colinds[j].second; ++k) { NTO mrhs = SR::multiply(Adcsc->numx[k], t_bval); IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash].first == key) //key is found in hash table { globalHashVec[hash].second = SR::add(mrhs, globalHashVec[hash].second); break; } else if (globalHashVec[hash].first == -1) //key is not registered yet { globalHashVec[hash].first = key; globalHashVec[hash].second = mrhs; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } if(sort) { // gather non-zero elements from hash table, and then sort them by row indices size_t index = 0; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { globalHashVec[index++] = globalHashVec[j]; } } //std::sort(globalHashVec.begin(), globalHashVec.begin() + index, sort_less<IT, NTO>); integerSort<NTO>(globalHashVec.data(), index); IT curptr = colptrC[i]; for (size_t j=0; j < index; ++j) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } else { IT curptr = colptrC[i]; for (size_t j=0; j < ht_size; ++j) { if (globalHashVec[j].first != -1) { tuplesC[curptr++]= std::make_tuple(globalHashVec[j].first, Bdcsc->jc[i], globalHashVec[j].second); } } } } if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; delete [] aux; SpTuples<IT, NTO>* spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, false); double t1=MPI_Wtime(); // std::cout << "localspgemminfo," << flop << "," << nnzc << "," << compression_ratio << "," << t1-t0 << std::endl; // std::cout << hashSelected << ", " << Bdcsc->nzc << ", " << (float)hashSelected / Bdcsc->nzc << std::endl; return spTuplesC; } /* * Estimates total flops necessary to multiply A and B * Then returns the number * */ template <typename SR, typename IT, typename NT1, typename NT2> IT EstimateLocalFLOP (const SpDCCols<IT, NT1> & A, const SpDCCols<IT, NT2> & B, bool clearA, bool clearB) { Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* flopC = estimateFLOP(A, B); IT* flopptr = prefixsum<IT>(flopC, Bdcsc->nzc, numThreads); IT flop = flopptr[Bdcsc->nzc]; delete [] flopC; if(clearA) delete const_cast<SpDCCols<IT, NT1> *>(&A); if(clearB) delete const_cast<SpDCCols<IT, NT2> *>(&B); delete [] flopptr; return flop; } // estimate space for result of SpGEMM template <typename IT, typename NT1, typename NT2> IT* estimateNNZ(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT * aux = nullptr, bool freeaux = true) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size if(aux == nullptr) { Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = new IT[Bdcsc->nzc]; // nnz in every nonempty column of C #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colnnzC[i] = 0; } // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector<std::pair<IT,IT>>> globalheapVec(numThreads); for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); globalheapVec[i].resize(nnzA/numThreads); } #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); globalheapVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); std::pair<IT,IT> * curheap = globalheapVec[myThread].data(); IT hsize = 0; // create the initial heap for(IT j = 0; (unsigned)j < nnzcolB; ++j) { if(colinds[j].first != colinds[j].second) { curheap[hsize++] = std::make_pair(Adcsc->ir[colinds[j].first], j); } } std::make_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>()); IT prevRow=-1; // previously popped row from heap while(hsize > 0) { std::pop_heap(curheap, curheap + hsize, std::greater<std::pair<IT,IT>>()); // result is stored in wset[hsize-1] IT locb = curheap[hsize-1].second; if( curheap[hsize-1].first != prevRow) { prevRow = curheap[hsize-1].first; colnnzC[i] ++; } if( (++(colinds[locb].first)) != colinds[locb].second) // current != end { curheap[hsize-1].first = Adcsc->ir[colinds[locb].first]; std::push_heap(curheap, curheap+hsize, std::greater<std::pair<IT,IT>>()); } else { --hsize; } } } if (freeaux) delete [] aux; return colnnzC; } // estimate space for result of SpGEMM with Hash template <typename IT, typename NT1, typename NT2> IT* estimateNNZ_Hash(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT *flopC, IT * aux=nullptr) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colnnzC = new IT[Bdcsc->nzc]; // nnz in every nonempty column of C /* #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colnnzC[i] = 0; } */ // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); std::vector<std::vector< IT>> globalHashVecAll(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { colnnzC[i] = 0; size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); std::pair<IT,IT> * colinds = colindsVec[myThread].data(); // Hash const IT minHashTableSize = 16; const IT hashScale = 107; // Initialize hash tables IT ht_size = minHashTableSize; while(ht_size < flopC[i]) //ht_size is set as 2^n { ht_size <<= 1; } if(globalHashVecAll[myThread].size() < ht_size) //resize thread private vectors if needed { globalHashVecAll[myThread].resize(ht_size); } IT* globalHashVec = globalHashVecAll[myThread].data(); for(IT j=0; (unsigned)j < ht_size; ++j) { globalHashVec[j] = -1; } for (IT j=0; (unsigned)j < nnzcolB; ++j) { IT t_bcol = Bdcsc->ir[Bdcsc->cp[i] + j]; for (IT k = colinds[j].first; (unsigned)k < colinds[j].second; ++k) { IT key = Adcsc->ir[k]; IT hash = (key*hashScale) & (ht_size-1); while (1) //hash probing { if (globalHashVec[hash] == key) //key is found in hash table { break; } else if (globalHashVec[hash] == -1) //key is not registered yet { globalHashVec[hash] = key; colnnzC[i] ++; break; } else //key is not found { hash = (hash+1) & (ht_size-1); } } } } } if(deleteAux) delete [] aux; return colnnzC; } // sampling-based nnz estimation (within SUMMA) template <typename IT, typename NT1, typename NT2> int64_t estimateNNZ_sampling( const SpDCCols<IT, NT1> &A, const SpDCCols<IT, NT2> &B, int nrounds = 5 ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return 0; Dcsc<IT,NT1> *Adcsc = A.GetDCSC(); Dcsc<IT,NT2> *Bdcsc = B.GetDCSC(); float lambda = 1.0f; float usedmem = 0.0f; IT m = A.getnrow(); IT p = A.getncol(); float *samples_init, *samples_mid, *samples_final; float *colest; // samples samples_init = (float *) malloc(m * nrounds * sizeof(*samples_init)); samples_mid = (float *) malloc(p * nrounds * sizeof(*samples_mid)); int nthds = 1; #ifdef THREADED #pragma omp parallel #endif { nthds = omp_get_num_threads(); } #ifdef THREADED #pragma omp parallel #endif { std::default_random_engine gen; std::exponential_distribution<float> exp_dist(lambda); #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < m * nrounds; ++i) samples_init[i] = exp_dist(gen); } #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < p * nrounds; ++i) samples_mid[i] = std::numeric_limits<float>::max(); #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Adcsc->nzc; ++i) { IT col = Adcsc->jc[i]; IT beg_mid = col * nrounds; for (IT j = Adcsc->cp[i]; j < Adcsc->cp[i + 1]; ++j) { IT row = Adcsc->ir[j]; IT beg_init = row * nrounds; for (int k = 0; k < nrounds; ++k) { if (samples_init[beg_init + k] < samples_mid[beg_mid + k]) samples_mid[beg_mid + k] = samples_init[beg_init + k]; } } } free(samples_init); samples_final = (float *) malloc(B.getnzc() * nrounds * sizeof(*samples_final)); colest = (float *) malloc(B.getnzc() * sizeof(*colest)); float nnzest = 0.0f; #ifdef THREADED #pragma omp parallel for reduction (+:nnzest) #endif for (IT i = 0; i < Bdcsc->nzc; ++i) { int tid = 0; #ifdef THREADED tid = omp_get_thread_num(); #endif IT beg_final = i * nrounds; for (IT k = beg_final; k < beg_final + nrounds; ++k) samples_final[k] = std::numeric_limits<float>::max(); for (IT j = Bdcsc->cp[i]; j < Bdcsc->cp[i + 1]; ++j) { IT row = Bdcsc->ir[j]; IT beg_mid = row * nrounds; for (int k = 0; k < nrounds; ++k) { if (samples_mid[beg_mid + k] < samples_final[beg_final + k]) samples_final[beg_final + k] = samples_mid[beg_mid + k]; } } colest[i] = 0.0f; for (IT k = beg_final; k < beg_final + nrounds; ++k) colest[i] += samples_final[k]; colest[i] = static_cast<float>(nrounds - 1) / colest[i]; nnzest += colest[i]; } free(samples_mid); free(samples_final); free(colest); return static_cast<int64_t>(nnzest); } // estimate the number of floating point operations of SpGEMM template <typename IT, typename NT1, typename NT2> IT* estimateFLOP(const SpDCCols<IT, NT1> & A,const SpDCCols<IT, NT2> & B, IT * aux = nullptr) { IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) { return NULL; } Dcsc<IT,NT1>* Adcsc = A.GetDCSC(); Dcsc<IT,NT2>* Bdcsc = B.GetDCSC(); float cf = static_cast<float>(A.getncol()+1) / static_cast<float>(Adcsc->nzc); IT csize = static_cast<IT>(ceil(cf)); // chunk size //IT * aux; bool deleteAux = false; if(aux==nullptr) { deleteAux = true; Adcsc->ConstructAux(A.getncol(), aux); } int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT* colflopC = new IT[Bdcsc->nzc]; // flop in every nonempty column of C #ifdef THREADED #pragma omp parallel for #endif for(IT i=0; i< Bdcsc->nzc; ++i) { colflopC[i] = 0; } // thread private space for heap and colinds std::vector<std::vector< std::pair<IT,IT>>> colindsVec(numThreads); /* for(int i=0; i<numThreads; i++) //inital allocation per thread, may be an overestimate, but does not require more memoty than inputs { colindsVec[i].resize(nnzA/numThreads); }*/ #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i < Bdcsc->nzc; ++i) { size_t nnzcolB = Bdcsc->cp[i+1] - Bdcsc->cp[i]; //nnz in the current column of B int myThread = 0; #ifdef THREADED myThread = omp_get_thread_num(); #endif if(colindsVec[myThread].size() < nnzcolB) //resize thread private vectors if needed { colindsVec[myThread].resize(nnzcolB); } // colinds.first vector keeps indices to A.cp, i.e. it dereferences "colnums" vector (above), // colinds.second vector keeps the end indices (i.e. it gives the index to the last valid element of A.cpnack) Adcsc->FillColInds(Bdcsc->ir + Bdcsc->cp[i], nnzcolB, colindsVec[myThread], aux, csize); for (IT j = 0; (unsigned)j < nnzcolB; ++j) { colflopC[i] += colindsVec[myThread][j].second - colindsVec[myThread][j].first; } } if(deleteAux) delete [] aux; return colflopC; } //////////////////////////////////////////////////////////////////////////////// //////////////////////////// CSC-based local SpGEMM //////////////////////////// //////////////////////////////////////////////////////////////////////////////// template <typename SR, typename NTO, typename IT, typename NT1, typename NT2> SpTuples <IT, NTO> * LocalHybridSpGEMM (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B, bool clearA, bool clearB ) { double t0 = MPI_Wtime(); IT mdim = A.getnrow(); IT ndim = B.getncol(); IT nnzA = A.getnnz(); if(A.isZero() || B.isZero()) return new SpTuples<IT, NTO>(0, mdim, ndim); Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *flopC = estimateFLOP(A, B); IT *flopptr = prefixsum<IT>(flopC, Bcsc->n, numThreads); IT flop = flopptr[Bcsc->n]; IT *colnnzC = estimateNNZ_Hash(A, B, flopC); IT *colptrC = prefixsum<IT>(colnnzC, Bcsc->n, numThreads); delete [] colnnzC; delete [] flopC; IT nnzc = colptrC[Bcsc->n]; double compression_ratio = (double)flop / nnzc; std::tuple<IT, IT, NTO> *tuplesC = static_cast<std::tuple<IT, IT, NTO> *> (::operator new (sizeof(std::tuple<IT, IT, NTO>[nnzc]))); #ifdef THREADED #pragma omp parallel for #endif for (size_t i = 0; i < Bcsc->n; ++i) { size_t nnzcolB = Bcsc->jc[i + 1] - Bcsc->jc[i]; double cr = static_cast<double> (flopptr[i+1] - flopptr[i]) / (colptrC[i+1] - colptrC[i]); if (cr < 2.0) // Heap Algorithm { std::vector<IT> cnt(nnzcolB); std::vector<HeapEntry<IT, NT1>> globalheapVec(nnzcolB); HeapEntry<IT, NT1> *wset = globalheapVec.data(); IT hsize = 0; for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { IT ca = Bcsc->ir[j]; cnt[j - Bcsc->jc[i]] = Acsc->jc[ca]; if (Acsc->jc[ca] != Acsc->jc[ca + 1]) // col not empty wset[hsize++] = HeapEntry<IT, NT1> (Acsc->ir[Acsc->jc[ca]], j, Acsc->num[Acsc->jc[ca]]); } std::make_heap(wset, wset + hsize); IT curptr = colptrC[i]; while (hsize > 0) { std::pop_heap(wset, wset + hsize); IT locb = wset[hsize - 1].runr; NTO mrhs = SR::multiply(wset[hsize - 1].num, Bcsc->num[locb]); if (!SR::returnedSAID()) { if ((curptr > colptrC[i]) && std::get<0>(tuplesC[curptr - 1]) == wset[hsize - 1].key) std::get<2>(tuplesC[curptr - 1]) = SR::add(std::get<2>(tuplesC[curptr - 1]), mrhs); else tuplesC[curptr++] = std::make_tuple(wset[hsize - 1].key, i, mrhs) ; } IT locb_offset = locb - Bcsc->jc[i]; IT ca = Bcsc->ir[locb]; ++(cnt[locb_offset]); if (cnt[locb_offset] != Acsc->jc[ca + 1]) { wset[hsize - 1].key = Acsc->ir[cnt[locb_offset]]; wset[hsize - 1].num = Acsc->num[cnt[locb_offset]]; std::push_heap(wset, wset + hsize); } else --hsize; } } // Finish heap else // Hash Algorithm { // Set up hash table const IT minHashTableSize = 16; const IT hashScale = 107; size_t nnzcolC = colptrC[i+1] - colptrC[i]; size_t ht_size = minHashTableSize; while (ht_size < nnzcolC) ht_size <<= 1; //std::vector< std::pair<IT, NTO>> T(ht_size); std::vector< std::pair<uint32_t, NTO>> T(ht_size); // uint32_t because integerSort accepts only uint32_t for (size_t j = 0; j < ht_size; ++j) T[j].first = std::numeric_limits<IT>::max(); // multiplication for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { IT t_bcol = Bcsc->ir[j]; NT2 t_bval = Bcsc->num[j]; for (IT k = Acsc->jc[t_bcol]; k < Acsc->jc[t_bcol + 1]; ++k) { NTO mrhs = SR::multiply(Acsc->num[k], t_bval); IT key = Acsc->ir[k]; IT hv = (key * hashScale) & (ht_size - 1); repeat: if (T[hv].first == key) T[hv].second = SR::add(mrhs, T[hv].second); else if (T[hv].first == std::numeric_limits<IT>::max()) { T[hv].first = key; T[hv].second = mrhs; } else { hv = (hv + 1) & (ht_size - 1); goto repeat; } } } size_t index = 0; for (size_t j = 0; j < ht_size; ++j) { if (T[j].first != std::numeric_limits<IT>::max()) T[index++] = T[j]; } //std::sort(T.begin(), T.begin() + index, sort_less<IT, NTO>); integerSort<NTO>(T.data(), index); IT curptr = colptrC[i]; for (size_t j = 0; j < index; ++j) tuplesC[curptr++] = std::make_tuple(T[j].first, i, T[j].second); } } if (clearA) delete const_cast<SpCCols<IT, NT1> *>(&A); if (clearB) delete const_cast<SpCCols<IT, NT2> *>(&B); delete [] colptrC; delete [] flopptr; SpTuples<IT, NTO> *spTuplesC = new SpTuples<IT, NTO> (nnzc, mdim, ndim, tuplesC, true, true); double t1 = MPI_Wtime(); return spTuplesC; } template <typename IT, typename NT1, typename NT2> IT * estimateFLOP (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return NULL; Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *colflopC = new IT[Bcsc->n]; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) colflopC[i] = 0; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) { for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i+1]; ++j) colflopC[i] += Acsc->jc[Bcsc->ir[j]+1] - Acsc->jc[Bcsc->ir[j]]; } return colflopC; } template <typename IT, typename NT1, typename NT2> IT * estimateNNZ_Hash (const SpCCols<IT, NT1> &A, const SpCCols<IT, NT2> &B, const IT *flopC ) { IT nnzA = A.getnnz(); if (A.isZero() || B.isZero()) return NULL; Csc<IT, NT1> *Acsc = A.GetCSC(); Csc<IT, NT2> *Bcsc = B.GetCSC(); int numThreads = 1; #ifdef THREADED #pragma omp parallel { numThreads = omp_get_num_threads(); } #endif IT *colnnzC = new IT[Bcsc->n]; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) colnnzC[i] = 0; #ifdef THREADED #pragma omp parallel for #endif for (IT i = 0; i < Bcsc->n; ++i) { // init hash table const IT minHashTableSize = 16; const IT hashScale = 107; IT ht_size = minHashTableSize; while (ht_size < flopC[i]) // make size of hash table a power of 2 ht_size <<= 1; // IT can be unsigned std::vector<IT> T(ht_size); for (IT j = 0; (unsigned)j < ht_size; ++j) T[j] = std::numeric_limits<IT>::max(); for (IT j = Bcsc->jc[i]; j < Bcsc->jc[i + 1]; ++j) { for (IT k = Acsc->jc[Bcsc->ir[j]]; k < Acsc->jc[Bcsc->ir[j]+1]; ++k) { IT key = Acsc->ir[k]; IT hv = (key * hashScale) & (ht_size - 1); while (1) { if (T[hv] == key) break; else if (T[hv] == std::numeric_limits<IT>::max()) { T[hv] = key; ++(colnnzC[i]); break; } else hv = (hv + 1) & (ht_size - 1); } } } } return colnnzC; } } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) { for (t4=max(max(ceild(t1-126,128),ceild(8*t2-Nz-499,512)),ceild(16*t3-Ny-499,512));t4<=min(min(floord(4*Nt+Nx-9,512),floord(4*t1+Nx-1,512)),floord(16*t3+Nx+3,512));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),128*t4+126);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
box_coder_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" namespace paddle { namespace operators { enum class BoxCodeType { kEncodeCenterSize = 0, kDecodeCenterSize = 1 }; inline BoxCodeType GetBoxCodeType(const std::string& type) { if (type == "encode_center_size") { return BoxCodeType::kEncodeCenterSize; } else if (type == "decode_center_size") { return BoxCodeType::kDecodeCenterSize; } PADDLE_THROW("Not support type %s.", type); } template <typename DeviceContext, typename T> class BoxCoderKernel : public framework::OpKernel<T> { public: void EncodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, const bool normalized, const std::vector<float> variance, T* output) const { int64_t row = target_box->dims()[0]; int64_t col = prior_box->dims()[0]; int64_t len = prior_box->dims()[1]; auto* target_box_data = target_box->data<T>(); auto* prior_box_data = prior_box->data<T>(); const T* prior_box_var_data = nullptr; if (prior_box_var) prior_box_var_data = prior_box_var->data<T>(); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { T prior_box_width = prior_box_data[j * len + 2] - prior_box_data[j * len] + (normalized == false); T prior_box_height = prior_box_data[j * len + 3] - prior_box_data[j * len + 1] + (normalized == false); T prior_box_center_x = prior_box_data[j * len] + prior_box_width / 2; T prior_box_center_y = prior_box_data[j * len + 1] + prior_box_height / 2; T target_box_center_x = (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; T target_box_center_y = (target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2; T target_box_width = target_box_data[i * len + 2] - target_box_data[i * len] + (normalized == false); T target_box_height = target_box_data[i * len + 3] - target_box_data[i * len + 1] + (normalized == false); size_t offset = i * col * len + j * len; output[offset] = (target_box_center_x - prior_box_center_x) / prior_box_width; output[offset + 1] = (target_box_center_y - prior_box_center_y) / prior_box_height; output[offset + 2] = std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = std::log(std::fabs(target_box_height / prior_box_height)); if (prior_box_var) { int prior_var_offset = j * len; output[offset] /= prior_box_var_data[prior_var_offset]; output[offset + 1] /= prior_box_var_data[prior_var_offset + 1]; output[offset + 2] /= prior_box_var_data[prior_var_offset + 2]; output[offset + 3] /= prior_box_var_data[prior_var_offset + 3]; } else if (!(variance.empty())) { for (int k = 0; k < 4; ++k) { output[offset + k] /= static_cast<T>(variance[k]); } } } } } template <int axis, int var_size> void DecodeCenterSize(const framework::Tensor* target_box, const framework::Tensor* prior_box, const framework::Tensor* prior_box_var, const bool normalized, std::vector<float> variance, T* output) const { int64_t row = target_box->dims()[0]; int64_t col = target_box->dims()[1]; int64_t len = target_box->dims()[2]; auto* target_box_data = target_box->data<T>(); auto* prior_box_data = prior_box->data<T>(); const T* prior_box_var_data = nullptr; if (var_size == 2) prior_box_var_data = prior_box_var->data<T>(); int prior_box_offset = 0; T var_data[4] = {1., 1., 1., 1.}; T* var_ptr = var_data; #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(2) #endif for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { size_t offset = i * col * len + j * len; prior_box_offset = axis == 0 ? j * len : i * len; T prior_box_width = prior_box_data[prior_box_offset + 2] - prior_box_data[prior_box_offset] + (normalized == false); T prior_box_height = prior_box_data[prior_box_offset + 3] - prior_box_data[prior_box_offset + 1] + (normalized == false); T prior_box_center_x = prior_box_data[prior_box_offset] + prior_box_width / 2; T prior_box_center_y = prior_box_data[prior_box_offset + 1] + prior_box_height / 2; T target_box_center_x = 0, target_box_center_y = 0; T target_box_width = 0, target_box_height = 0; int prior_var_offset = axis == 0 ? j * len : i * len; if (var_size == 2) { std::memcpy(var_ptr, prior_box_var_data + prior_var_offset, 4 * sizeof(T)); } else if (var_size == 1) { var_ptr = reinterpret_cast<T*>(variance.data()); } T box_var_x = *var_ptr; T box_var_y = *(var_ptr + 1); T box_var_w = *(var_ptr + 2); T box_var_h = *(var_ptr + 3); target_box_center_x = box_var_x * target_box_data[offset] * prior_box_width + prior_box_center_x; target_box_center_y = box_var_y * target_box_data[offset + 1] * prior_box_height + prior_box_center_y; target_box_width = std::exp(box_var_w * target_box_data[offset + 2]) * prior_box_width; target_box_height = std::exp(box_var_h * target_box_data[offset + 3]) * prior_box_height; output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; output[offset + 2] = target_box_center_x + target_box_width / 2 - (normalized == false); output[offset + 3] = target_box_center_y + target_box_height / 2 - (normalized == false); } } } void Compute(const framework::ExecutionContext& context) const override { auto* prior_box = context.Input<framework::Tensor>("PriorBox"); auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar"); auto* target_box = context.Input<framework::LoDTensor>("TargetBox"); auto* output_box = context.Output<framework::Tensor>("OutputBox"); std::vector<float> variance = context.Attr<std::vector<float>>("variance"); const int axis = context.Attr<int>("axis"); if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1UL, "Only support 1 level of LoD."); } if (prior_box_var) { PADDLE_ENFORCE(variance.empty(), "Input 'PriorBoxVar' and attribute 'variance' should not" "be used at the same time."); } if (!(variance.empty())) { PADDLE_ENFORCE(static_cast<int>(variance.size()) == 4, "Size of attribute 'variance' should be 4"); } auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type")); bool normalized = context.Attr<bool>("box_normalized"); auto row = target_box->dims()[0]; auto col = prior_box->dims()[0]; if (code_type == BoxCodeType::kDecodeCenterSize) { col = target_box->dims()[1]; } auto len = prior_box->dims()[1]; output_box->mutable_data<T>({row, col, len}, context.GetPlace()); T* output = output_box->data<T>(); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, variance, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { if (prior_box_var) { if (axis == 0) { DecodeCenterSize<0, 2>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 2>(target_box, prior_box, prior_box_var, normalized, variance, output); } } else if (!(variance.empty())) { if (axis == 0) { DecodeCenterSize<0, 1>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 1>(target_box, prior_box, prior_box_var, normalized, variance, output); } } else { if (axis == 0) { DecodeCenterSize<0, 0>(target_box, prior_box, prior_box_var, normalized, variance, output); } else { DecodeCenterSize<1, 0>(target_box, prior_box, prior_box_var, normalized, variance, output); } } } } }; } // namespace operators } // namespace paddle
core_dtslqt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztslqt.c, normal z -> d, Fri Sep 28 17:38:24 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> #undef REAL #define REAL /***************************************************************************//** * * @ingroup core_tslqt * * Computes an LQ factorization of a rectangular matrix * formed by coupling side-by-side a complex m-by-m * lower triangular tile A1 and a complex m-by-n tile A2: * * | A1 A2 | = L * Q * * The tile Q is represented as a product of elementary reflectors * * Q = H(k)^T . . . H(2)^T H(1)^T, where k = min(m,n). * * Each H(i) has the form * * H(i) = I - tau * v * v^T * * where tau is a complex scalar, and v is a complex vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:n)^T is stored on exit in * A2(i,1:n), and tau in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A1 and A2. m >= 0. * The number of columns of the tile A1. * * @param[in] n * The number of columns of the tile A2. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m-by-m tile A1. * On exit, the elements on and below the diagonal of the array * contain the m-by-m lower trapezoidal tile L; * the elements above the diagonal are not referenced. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m). * * @param[in,out] A2 * On entry, the m-by-n tile A2. * On exit, all the elements with the array tau, represent * the orthogonal tile Q as a product of elementary reflectors * (see Further Details). * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m). * * @param[out] T * The ib-by-m triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliarry workspace array of length m. * * @param work * Auxiliary workspace array of length ib*m. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_dtslqt(int m, int n, int ib, double *A1, int lda1, double *A2, int lda2, double *T, int ldt, double *tau, double *work) { // Check input arguments. if (m < 0) { plasma_coreblas_error("illegal value of m"); return -1; } if (n < 0) { plasma_coreblas_error("illegal value of n"); return -2; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -3; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -4; } if (lda1 < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda1"); return -5; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -6; } if (lda2 < imax(1, m) && m > 0) { plasma_coreblas_error("illegal value of lda2"); return -7; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -8; } if (ldt < imax(1, ib) && ib > 0) { plasma_coreblas_error("illegal value of ldt"); return -9; } if (tau == NULL) { plasma_coreblas_error("NULL tau"); return -10; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -11; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; static double zone = 1.0; static double zzero = 0.0; for (int ii = 0; ii < m; ii += ib) { int sb = imin(m-ii, ib); for (int i = 0; i < sb; i++) { // Generate elementary reflector H(ii*ib+i) to annihilate // A(ii*ib+i,ii*ib+i:n). #ifdef COMPLEX LAPACKE_dlacgv_work(n, &A2[ii+i], lda2); LAPACKE_dlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif LAPACKE_dlarfg_work(n+1, &A1[lda1*(ii+i)+ii+i], &A2[ii+i], lda2, &tau[ii+i]); double alpha = -(tau[ii+i]); if (ii+i+1 < m) { // Apply H(ii+i-1) to A(ii+i:ii+ib-1, ii+i-1:n) from the right. cblas_dcopy(sb-i-1, &A1[lda1*(ii+i)+(ii+i+1)], 1, work, 1); cblas_dgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, sb-i-1, n, (zone), &A2[ii+i+1], lda2, &A2[ii+i], lda2, (zone), work, 1); cblas_daxpy(sb-i-1, (alpha), work, 1, &A1[lda1*(ii+i)+ii+i+1], 1); cblas_dger(CblasColMajor, sb-i-1, n, (alpha), work, 1, &A2[ii+i], lda2, &A2[ii+i+1], lda2); } // Calculate T. cblas_dgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, i, n, (alpha), &A2[ii], lda2, &A2[ii+i], lda2, (zzero), &T[ldt*(ii+i)], 1); #ifdef COMPLEX LAPACKE_dlacgv_work(n, &A2[ii+i], lda2); LAPACKE_dlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif cblas_dtrmv( CblasColMajor, (CBLAS_UPLO)PlasmaUpper, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_DIAG)PlasmaNonUnit, i, &T[ldt*ii], ldt, &T[ldt*(ii+i)], 1); T[ldt*(ii+i)+i] = tau[ii+i]; } if (m > ii+sb) { plasma_core_dtsmlq(PlasmaRight, PlasmaTrans, m-(ii+sb), sb, m-(ii+sb), n, ib, ib, &A1[lda1*ii+ii+sb], lda1, &A2[ii+sb], lda2, &A2[ii], lda2, &T[ldt*ii], ldt, work, lda1); } } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_dtslqt(int m, int n, int ib, double *A1, int lda1, double *A2, int lda2, double *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*m]) \ depend(inout:A2[0:lda2*n]) \ depend(out:T[0:ib*m]) // T should be mxib, but is stored // as ibxm { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); double *tau = ((double*)work.spaces[tid]); // Call the kernel. int info = plasma_core_dtslqt(m, n, ib, A1, lda1, A2, lda2, T, ldt, tau, tau+m); if (info != PlasmaSuccess) { plasma_error("core_dtslqt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
targ_static.c
#include <stdio.h> #include "assert.h" #include <unistd.h> #define NZ 10 #define NA 9 #pragma omp declare target static int colstat[NZ]; #pragma omp end declare target int main(){ colstat[0]=-1; #pragma omp target map(alloc:colstat[0:NZ]) { colstat[1] = 1111; } #pragma omp target map(alloc:colstat[:0]) { colstat[2] = 2222; } fprintf(stderr, "BEFORE colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]); #pragma omp target update from(colstat) fprintf(stderr, "AFTER colstat[0..2] %d %d %d \n", colstat[0], colstat[1], colstat[2]); if (colstat[1] == 1111 && colstat[2] == 2222) printf("Success\n"); else printf("Fail!\n"); return (colstat[1] == 1111 && colstat[2] == 2222) ? 0 : 1 ; }
GB_AxB_rowscale_meta.c
//------------------------------------------------------------------------------ // GB_AxB_rowscale_meta: C=D*B where D is a square diagonal matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // All entries in C=D*B are computed fully in parallel. { // Bx is unused if the operator is FIRST or PAIR #include "GB_unused.h" //-------------------------------------------------------------------------- // get C, D, and B //-------------------------------------------------------------------------- const GB_ATYPE *GB_RESTRICT Dx = (GB_ATYPE *) (D_is_pattern ? NULL : D->x) ; const GB_BTYPE *GB_RESTRICT Bx = (GB_BTYPE *) (B_is_pattern ? NULL : B->x) ; const int64_t *GB_RESTRICT Bi = B->i ; int64_t bnz = GB_NNZ (B) ; //-------------------------------------------------------------------------- // C=D*B //-------------------------------------------------------------------------- int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (bnz, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t pstart, pend ; GB_PARTITION (pstart, pend, bnz, tid, ntasks) ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t p = pstart ; p < pend ; p++) { int64_t i = Bi [p] ; // get row index of B(i,j) GB_GETA (dii, Dx, i) ; // dii = D(i,i) GB_GETB (bij, Bx, p) ; // bij = B(i,j) GB_BINOP (GB_CX (p), dii, bij) ; // C(i,j) = dii*bij } } }
linear_tree_learner.h
/*! * Copyright (c) 2020 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_TREELEARNER_LINEAR_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_LINEAR_TREE_LEARNER_H_ #include <string> #include <cmath> #include <cstdio> #include <memory> #include <random> #include <vector> #include "serial_tree_learner.h" namespace LightGBM { class LinearTreeLearner: public SerialTreeLearner { public: explicit LinearTreeLearner(const Config* config) : SerialTreeLearner(config) {} void Init(const Dataset* train_data, bool is_constant_hessian) override; void InitLinear(const Dataset* train_data, const int max_leaves) override; Tree* Train(const score_t* gradients, const score_t *hessians, bool is_first_tree) override; /*! \brief Create array mapping dataset to leaf index, used for linear trees */ void GetLeafMap(Tree* tree) const; template<bool HAS_NAN> void CalculateLinear(Tree* tree, bool is_refit, const score_t* gradients, const score_t* hessians, bool is_first_tree) const; Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override; Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred, const score_t* gradients, const score_t* hessians) const override; void AddPredictionToScore(const Tree* tree, double* out_score) const override { CHECK_LE(tree->num_leaves(), data_partition_->num_leaves()); bool has_nan = false; if (any_nan_) { for (int i = 0; i < tree->num_leaves() - 1 ; ++i) { // use split_feature because split_feature_inner doesn't work when refitting existing tree if (contains_nan_[train_data_->InnerFeatureIndex(tree->split_feature(i))]) { has_nan = true; break; } } } if (has_nan) { AddPredictionToScoreInner<true>(tree, out_score); } else { AddPredictionToScoreInner<false>(tree, out_score); } } template<bool HAS_NAN> void AddPredictionToScoreInner(const Tree* tree, double* out_score) const { int num_leaves = tree->num_leaves(); std::vector<double> leaf_const(num_leaves); std::vector<std::vector<double>> leaf_coeff(num_leaves); std::vector<std::vector<const float*>> feat_ptr(num_leaves); std::vector<double> leaf_output(num_leaves); std::vector<int> leaf_num_features(num_leaves); for (int leaf_num = 0; leaf_num < num_leaves; ++leaf_num) { leaf_const[leaf_num] = tree->LeafConst(leaf_num); leaf_coeff[leaf_num] = tree->LeafCoeffs(leaf_num); leaf_output[leaf_num] = tree->LeafOutput(leaf_num); for (int feat : tree->LeafFeaturesInner(leaf_num)) { feat_ptr[leaf_num].push_back(train_data_->raw_index(feat)); } leaf_num_features[leaf_num] = feat_ptr[leaf_num].size(); } OMP_INIT_EX(); #pragma omp parallel for schedule(static) if (num_data_ > 1024) for (int i = 0; i < num_data_; ++i) { OMP_LOOP_EX_BEGIN(); int leaf_num = leaf_map_[i]; if (leaf_num < 0) { continue; } double output = leaf_const[leaf_num]; int num_feat = leaf_num_features[leaf_num]; if (HAS_NAN) { bool nan_found = false; for (int feat_ind = 0; feat_ind < num_feat; ++feat_ind) { float val = feat_ptr[leaf_num][feat_ind][i]; if (std::isnan(val)) { nan_found = true; break; } output += val * leaf_coeff[leaf_num][feat_ind]; } if (nan_found) { out_score[i] += leaf_output[leaf_num]; } else { out_score[i] += output; } } else { for (int feat_ind = 0; feat_ind < num_feat; ++feat_ind) { output += feat_ptr[leaf_num][feat_ind][i] * leaf_coeff[leaf_num][feat_ind]; } out_score[i] += output; } OMP_LOOP_EX_END(); } OMP_THROW_EX(); } protected: /*! \brief whether numerical features contain any nan values */ std::vector<int8_t> contains_nan_; /*! whether any numerical feature contains a nan value */ bool any_nan_; /*! \brief map dataset to leaves */ mutable std::vector<int> leaf_map_; /*! \brief temporary storage for calculating linear model coefficients */ mutable std::vector<std::vector<float>> XTHX_; mutable std::vector<std::vector<float>> XTg_; mutable std::vector<std::vector<std::vector<float>>> XTHX_by_thread_; mutable std::vector<std::vector<std::vector<float>>> XTg_by_thread_; }; } // namespace LightGBM #endif // LightGBM_TREELEARNER_LINEAR_TREE_LEARNER_H_
file_io.c
#include "file_io.h" void compress_annotations_disk(BFT_Root* bft, char* filename_bft){ ASSERT_NULL_PTR(bft, "compress_annotations_disk()\n") ASSERT_NULL_PTR(filename_bft, "compress_annotations_disk()\n") Pvoid_t comp_annots = (PWord_t)NULL; Word_t Rc_word; memory_Used* mem; bool is_compressed = (bft->compressed == 1 ? true : false); int len_longest_annot; int lvl_bft = (bft->k / NB_CHAR_SUF_PREF) - 1; char* filename_bft_tmp = (char*) malloc((strlen(filename_bft) + 50) * sizeof(char)); ASSERT_NULL_PTR(filename_bft_tmp, "compress_annotations_disk()\n") strcpy(filename_bft_tmp, filename_bft); strcpy(&filename_bft_tmp[strlen(filename_bft)], "_annots"); mem = printMemoryUsedFromNode(&(bft->node), lvl_bft, bft->k, bft->info_per_lvl); len_longest_annot = (int) MAX(mem->size_biggest_annot+1, getMaxSize_annotation_array_elem(bft->comp_set_colors)); free(mem); if (is_compressed){ bft->compressed = 0; write_BFT_Root(bft, filename_bft, false); bft->compressed = 1; } else write_BFT_Root(bft, filename_bft, false); load_annotation_from_Node(&(bft->node), lvl_bft, bft->k, len_longest_annot, bft->info_per_lvl, &comp_annots, bft->comp_set_colors, bft->ann_inf); //Load annot in a compressed way freeNode(&bft->node, lvl_bft, bft->info_per_lvl); free_annotation_array_elem(&(bft->comp_set_colors), &(bft->length_comp_set_colors)); sort_annotations3(&comp_annots, len_longest_annot); write_partial_comp_set_colors(filename_bft_tmp, &comp_annots, len_longest_annot); //Write new annots compressed read_BFT_replace_comp_annots_bis(bft, filename_bft, filename_bft_tmp, &comp_annots, len_longest_annot, is_compressed); if (remove(filename_bft)) printf("Warning: Could not remove temporary file.\n"); if (is_compressed) bft->compressed = 1; #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = (uint8_t*) calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T - 1) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "compressKmers_from_KmerFiles()\n"); JSLF(PValue, comp_annots, it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, comp_annots, it_index); } free(it_index); #endif JSLFA(Rc_word, comp_annots); free_annotation_array_elem(&(bft->comp_set_colors), &(bft->length_comp_set_colors)); read_annotation_array_elem(filename_bft_tmp, &(bft->comp_set_colors), &(bft->length_comp_set_colors)); if (remove(filename_bft_tmp)) printf("Warning: Could not remove temporary file.\n"); free(filename_bft_tmp); return; } /* --------------------------------------------------------------------------------------------------------------- * insert_Genomes_from_KmerFiles(root, filenames, binary_files, size_kmer, ptr_ptr_annot_sorted) * --------------------------------------------------------------------------------------------------------------- * Insert k-mers from k-mer files into a BFT * --------------------------------------------------------------------------------------------------------------- * root: ptr to the root of a BFT * filenames: array of filenames. The files contains the k-mers to insert. * binary_files: Indicate if the files contains k-mers (ASCII) or compressed k-mers (2 bits per nuc.) * size_kmer: length k of k-mers in files * --------------------------------------------------------------------------------------------------------------- */ void insert_Genomes_from_KmerFiles(BFT_Root* root, int nb_files, char** filenames, int binary_files, char* filename_bft){ ASSERT_NULL_PTR(root,"insert_Genomes_from_KmerFiles()") ASSERT_NULL_PTR(filenames,"insert_Genomes_from_KmerFiles()") struct timeval tval_before, tval_after, tval_last, tval_result; gettimeofday(&tval_before, NULL); tval_last = tval_before; FILE* file; uint8_t* array_kmers = (uint8_t*) calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"insert_Genomes_from_KmerFiles()") char* line = (char*) calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"insert_Genomes_from_KmerFiles()") char* str_tmp; int i = 0, j = 0, k = 0; int size_id_genome = 0; int nb_genomes_before = root->nb_genomes; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; size_t return_fread; uint64_t kmers_read; for (i = 0; i < nb_files; i++){ //For each file in input k = 0; j = 0; kmers_read = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root); size_id_genome = get_nb_bytes_power2_annot(root->nb_genomes-1); file = fopen(filenames[i], "r"); ASSERT_NULL_PTR(file,"insert_Genomes_from_KmerFiles()") printf("\nFile %d: %s\n\n", root->nb_genomes-1, filenames[i]); if (binary_files){ if (fgets(line, 100, file) != NULL) k = atoi(line); else ERROR("Cannot read header of the file") if (fgets(line, 100, file) != NULL) printf("%d %d-mers in the file\n\n", atoi(line), k); else ERROR("Cannot read header of the file") while ((!ferror(file)) && (!feof(file))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file); insertKmers(root, array_kmers, return_fread, root->nb_genomes-1, size_id_genome); memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)){ printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); } kmers_read += return_fread; } } else { while (fgets(line, 100, file) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ insertKmers(root, array_kmers, nb_kmer_in_buf, root->nb_genomes-1, size_id_genome); j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)){ printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); } kmers_read += nb_kmer_in_buf; } } } insertKmers(root, array_kmers, j, root->nb_genomes-1, size_id_genome); kmers_read += j; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file); //if (root->treshold_compression && (root->nb_genomes - 1 > 5) && ((root->nb_genomes - 1) % root->treshold_compression == 0)) //compress_annotations_disk(root, filename_bft); if (root->treshold_compression && (root->nb_genomes - 1 > 5) && (root->nb_genomes == nb_genomes_before + nb_files)) compress_annotations_disk(root, filename_bft); gettimeofday(&tval_after, NULL); time_spent(&tval_last, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); tval_last = tval_after; } free(line); free(array_kmers); return; } /* --------------------------------------------------------------------------------------------------------------- * insert_Genomes_from_FASTxFiles(root, filenames, size_kmer, ptr_ptr_annot_sorted) * --------------------------------------------------------------------------------------------------------------- * Insert k-mers from FASTx files into a BFT * --------------------------------------------------------------------------------------------------------------- * root: ptr to the root of a BFT * filenames: array of FASTx filenames * size_kmer: length k of k-mers to extract from the FASTx files * --------------------------------------------------------------------------------------------------------------- */ void insert_Genomes_from_FASTxFiles(BFT_Root* root, int nb_files, char** filenames){ ASSERT_NULL_PTR(root,"insert_Genomes_from_FASTxFiles()") ASSERT_NULL_PTR(filenames,"insert_Genomes_from_FASTxFiles()") /*struct timeval tval_before, tval_after, tval_last, tval_result; gettimeofday(&tval_before, NULL); tval_last = tval_before; int i = 0; int size_buf_tmp = 0; //How many characters are stored in buf_tmp int nb_kmers_buf = 0; int size_id_genome = 0; int length_comp_set_colors_tmp = 0; int nb_cell_kmer = CEIL(size_kmer*2, SIZE_BITS_UINT_8T); //Size of kmers in bytes annotation_array_elem* comp_set_colors_tmp = NULL; Pvoid_t PJArray = (PWord_t)NULL; Word_t Rc_word; char* str_tmp; char* buf_tmp = (char*) calloc((size_kmer-1)*2, sizeof(char)); //Allocate temporary buffer ASSERT_NULL_PTR(buf_tmp,"insert_Genomes_from_FASTxFiles()") uint8_t* tab_kmers = (uint8_t*) calloc(SIZE_BUFFER*nb_cell_kmer, sizeof(uint8_t)); //Allocate buffer for kmers ASSERT_NULL_PTR(tab_kmers,"insert_Genomes_from_FASTxFiles()") uint64_t kmers_read = 0; uint64_t tmp_kmers_read = 0; for (i = 0; i < nb_files; i++){ //For each file in input size_buf_tmp = 0; kmers_read = 0; tmp_kmers_read = 0; nb_kmers_buf = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root); size_id_genome = get_nb_bytes_power2_annot(root->nb_genomes-1); int fp = open(filenames[i], O_RDONLY); //Open it kseq_t *seq = kseq_init(fp); //initialize the parser for this file int size_seq = kseq_read(seq, -1); //Start reading file, seq contains a buffer with a part of a sequence from the file printf("\nFile : %s\n\n", filenames[i]); while (size_seq > -1) { //While the end of the file is not reached if (size_seq > 0) size_buf_tmp = 0; //New sequence int current_buf_length = seq->seq.l - seq->seq.z; //Number of characters put into the seq buffer if (current_buf_length > 0){ //If the seq buffer is not empty nb_kmers_buf = MAX(current_buf_length-size_kmer+1, 0); //Number of kmers it is possible to read in seq buffer if (size_buf_tmp == 0){ //If the number of characters in the temporary buffer is 0 if (nb_kmers_buf != 0){ //If there is at least one kmer in the seq buffer memcpy(buf_tmp, &(seq->seq.s[nb_kmers_buf]), size_kmer-1); //Copy the last size_kmer-1 characters of seq-buffer into buf_tmp size_buf_tmp = (size_kmer-1); } else{ memcpy(buf_tmp, &(seq->seq.s[0]), current_buf_length); //Copy the content of seq buffer into buf_tmp size_buf_tmp = current_buf_length; } } else { //If the number of characters in the temporary buffer is not 0 //Insertion of kmers overlapping the last buffer and the current one (they are in buf_tmp) int size_to_copy = MIN(size_kmer-1, current_buf_length); memcpy(&(buf_tmp[size_buf_tmp]), seq->seq.s, size_to_copy); size_buf_tmp += size_to_copy; int nb_kmers = size_buf_tmp - size_kmer + 1; if (nb_kmers > 0){ parseSequenceBuffer(buf_tmp, tab_kmers, &nb_kmers, size_kmer, nb_cell_kmer); //Read buf_tmp, extract the kmers in tab_kmers insertKmers(root, tab_kmers, size_kmer, nb_kmers, root->nb_genomes-1, size_id_genome, 0); //Insert the kmers into the tree memset(tab_kmers, 0, nb_kmers*nb_cell_kmer*sizeof(uint8_t)); //Reinit tab_kmers tmp_kmers_read = nb_kmers; } else tmp_kmers_read = 0; if (nb_kmers_buf != 0){ memcpy(buf_tmp, &(seq->seq.s[nb_kmers_buf]), size_kmer-1); size_buf_tmp = size_kmer-1; } else{ memcpy(buf_tmp, &(seq->seq.s[0]), current_buf_length); size_buf_tmp = current_buf_length; } } //Extraction of buffer's kmers. Insertion in the tree. if (nb_kmers_buf > 0){ parseSequenceBuffer(seq->seq.s, tab_kmers, &nb_kmers_buf, size_kmer, nb_cell_kmer); insertKmers(root, tab_kmers, size_kmer, nb_kmers_buf, root->nb_genomes-1, size_id_genome, 0); memset(tab_kmers, 0, nb_kmers_buf*nb_cell_kmer*sizeof(uint8_t)); tmp_kmers_read += nb_kmers_buf; } //Display how many kmers were read if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+tmp_kmers_read)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+tmp_kmers_read); kmers_read += tmp_kmers_read; } size_seq = kseq_read(seq, size_seq); } if (root->treshold_compression != 0){ if ((root->nb_genomes-1 > 5) && ((root->nb_genomes-1)%root->treshold_compression == 0)){ load_annotation_from_Node(&(root->node), size_kmer, info_per_lvl, &PJArray, root->comp_set_colors); comp_set_colors_tmp = root->comp_set_colors; length_comp_set_colors_tmp = root->length_comp_set_colors; memory_Used* mem = printMemoryUsedFromNode(&(root->node), lvl_root, root->k, root->info_per_lvl); root->comp_set_colors = sort_annotations(&PJArray, &(root->length_comp_set_colors), mem->size_biggest_annot); free(mem); compress_annotation_from_Node(&(root->node), size_kmer, info_per_lvl, &PJArray, comp_set_colors_tmp); free_annotation_array_elem(&comp_set_colors_tmp, length_comp_set_colors_tmp); #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = (uint8_t*) calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "sort_annotations()"); JSLF(PValue, PJArray, it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, PJArray, it_index); } free(it_index); #endif JSLFA(Rc_word, PJArray); } } kseq_destroy(seq); close(fp); gettimeofday(&tval_after, NULL); time_spent(&tval_last, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); tval_last = tval_after; } memory_Used* mem = printMemoryUsedFromNode(&(root->node), size_kmer, info_per_lvl); printMemory(mem); free(mem); free(buf_tmp); free(tab_kmers);*/ return; } /*int queryBFT_kmerPresences_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file, char* output_filename){ ASSERT_NULL_PTR(root,"queryBFT_kmerPresences_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerPresences_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); const char comma = ','; //int annot_present; int size_annot; int size_annot_cplx; int size_annot_res; int i = 0; int j = 0; int k = 0; int nb_kmers_present = 0; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; uint64_t kmers_read = 0; FILE* file_query; FILE* file_output; resultPresence* res; size_t return_fread; uint8_t* annot; uint8_t* annot_ext; uint8_t* annot_cplx; uint8_t* annot_res = (uint8_t*) calloc(CEIL(root->nb_genomes+2, SIZE_BITS_UINT_8T), sizeof(uint8_t)); ASSERT_NULL_PTR(annot_res,"queryBFT_kmerPresences_from_KmerFiles()") uint8_t* array_kmers = (uint8_t*) calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerPresences_from_KmerFiles()") char* line = (char*) calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"queryBFT_kmerPresences_from_KmerFiles()") file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"queryBFT_kmerPresences_from_KmerFiles()") file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"queryBFT_kmerPresences_from_KmerFiles()") printf("\nQuerying BFT for k-mers in %s\n\n", query_filename); for (i=0; i<root->nb_genomes-1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i])-1, file_output); fwrite(&comma, sizeof(char), 1, file_output); } fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (binary_file){ if (fgets(line, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") if (fgets(line, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") while ((!ferror(file_query)) && (!feof(file_query))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file_query); for (k=0; k<(int)return_fread; k++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ while (fgets(line, 100, file_query) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ for (i=0; i<nb_kmer_in_buf; i++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); kmers_read += nb_kmer_in_buf; } } } for (i=0; i<j; i++){ res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k); if (res->link_child != NULL){ if (res->posFilter2 != 0){ get_annot((UC*)res->container, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } else{ get_annot(&(((UC*)((CC*)res->container)->children)[res->bucket]), &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx, res->posFilter2, res->posFilter3, res->pos_sub_bucket); } if (size_annot != 0){ memcpy(annot_res, annot, size_annot * sizeof(uint8_t)); size_annot_res = size_annot; } if ((annot_ext != NULL) && (annot_ext[0] != 0)){ memcpy(&(annot_res[size_annot]), annot_ext, sizeof(uint8_t)); size_annot_res++; } if (size_annot_cplx != 0){ memcpy(annot_res, annot_cplx, size_annot_cplx * sizeof(uint8_t)); size_annot_res = size_annot_cplx; } printAnnotation_CSV(file_output, annot_res, size_annot_res, NULL, 0, root->nb_genomes-1, root->comp_set_colors); nb_kmers_present++; } else { annot_res[0] = 0; printAnnotation_CSV(file_output, annot_res, 1, NULL, 0, root->nb_genomes-1, root->comp_set_colors); } free(res); } memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file_query); fclose(file_output); free(array_kmers); free(annot_res); free(line); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return nb_kmers_present; }*/ int queryBFT_kmerPresences_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file, char* output_filename){ ASSERT_NULL_PTR(root,"queryBFT_kmerPresences_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerPresences_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); const char nl = '\n'; const char eol = '\0'; const char csv_sep = ','; const char not_present = '0'; const char present = '1'; int j = 0, k = 0; int res_parseKmerCount; int nb_kmers_present = 0; int nb_bytes_kmer = CEIL(root->k * 2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; ssize_t res_get_line; size_t return_fread; size_t size_buffer_queries = SIZE_BUFFER; uint32_t i, it_annot, it_csv_line_res; uint32_t* ids_present; uint64_t kmers_read = 0; BFT_kmer* bft_kmer; BFT_annotation* bft_annot; bool* is_iupac; char* buffer_queries = (char*) calloc(size_buffer_queries, sizeof(char)); ASSERT_NULL_PTR(buffer_queries,"query_sequences_outputCSV()\n"); char* csv_line_res = (char*) calloc(root->nb_genomes * 2, sizeof(char)); ASSERT_NULL_PTR(csv_line_res,"query_sequences_outputCSV()\n"); uint8_t* array_kmers = (uint8_t*) calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerPresences_from_KmerFiles()") FILE* file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"queryBFT_kmerPresences_from_KmerFiles()") FILE* file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"queryBFT_kmerPresences_from_KmerFiles()") bft_kmer = create_empty_kmer(); printf("\nQuerying BFT for k-mers in %s\n\n", query_filename); for (i = 0; i < root->nb_genomes - 1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); fwrite(&csv_sep, sizeof(char), 1, file_output); csv_line_res[i * 2 + 1] = csv_sep; } csv_line_res[root->nb_genomes * 2 - 1] = nl; fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (fwrite(&nl, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); if (binary_file){ if (fgets(buffer_queries, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") if (fgets(buffer_queries, 100, file_query) == NULL) ERROR("Cannot read header of the queries file") while ((!ferror(file_query)) && (!feof(file_query))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file_query); for (k = 0; k < (int)return_fread; k++){ bft_kmer->res = isKmerPresent(&(root->node), root, lvl_root, &(array_kmers[k * nb_bytes_kmer]), root->k); it_csv_line_res = 0; if (is_kmer_in_cdbg(bft_kmer)){ nb_kmers_present++; bft_annot = get_annotation(bft_kmer); ids_present = get_list_id_genomes(bft_annot, root); free_BFT_annotation(bft_annot); for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; free(ids_present); } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); free(bft_kmer->res); } kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ is_iupac = (bool*) malloc(nb_kmer_in_buf * sizeof(bool)); ASSERT_NULL_PTR(is_iupac, "queryBFT_kmerPresences_from_KmerFiles()") res_get_line = getline(&buffer_queries, &size_buffer_queries, file_query); while ((res_get_line != -1) || j){ if (res_get_line != -1){ buffer_queries[strcspn(buffer_queries, "\r\n")] = '\0'; kmers_read++; res_parseKmerCount = parseKmerCount(buffer_queries, root->k, array_kmers, k); } else res_parseKmerCount = 1; if (res_parseKmerCount == 1){ if (res_get_line != -1){ is_iupac[j] = false; k += nb_bytes_kmer; j++; } if ((res_get_line == -1) || (j == nb_kmer_in_buf)){ for (k = 0; k < j; k++){ if (!is_iupac[k]){ bft_kmer->res = isKmerPresent(&(root->node), root, lvl_root, &array_kmers[k * nb_bytes_kmer], root->k); it_csv_line_res = 0; if (is_kmer_in_cdbg(bft_kmer)){ nb_kmers_present++; bft_annot = get_annotation(bft_kmer); ids_present = get_list_id_genomes(bft_annot, root); free_BFT_annotation(bft_annot); for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; free(ids_present); } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } free(bft_kmer->res); } else { it_csv_line_res = 0; for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER * sizeof(uint8_t)); } } else is_iupac[j] = true; //if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) // printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); res_get_line = getline(&buffer_queries, &size_buffer_queries, file_query); } free(is_iupac); } fseek(file_output, 0 - ((long int) sizeof(char)), SEEK_CUR); if (fwrite(&eol, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); fclose(file_query); fclose(file_output); free(bft_kmer); free(array_kmers); free(buffer_queries); free(csv_line_res); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return nb_kmers_present; } int queryBFT_kmerBranching_from_KmerFiles(BFT_Root* root, char* query_filename, int binary_file){ ASSERT_NULL_PTR(root,"queryBFT_kmerBranching_from_KmerFiles()") ASSERT_NULL_PTR(query_filename,"queryBFT_kmerBranching_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); int i = 0; int j = 0; int k = 0; int count_branching_node = 0; int lvl_root = (root->k / NB_CHAR_SUF_PREF) - 1; int nb_bytes_kmer = CEIL(root->k*2, SIZE_BITS_UINT_8T); int nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; uint64_t kmers_read = 0; FILE* file; size_t return_fread; uint8_t* array_kmers = (uint8_t*) calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers,"queryBFT_kmerBranching_from_KmerFiles()") char* line = (char*) calloc(100, sizeof(char)); ASSERT_NULL_PTR(line,"queryBFT_kmerBranching_from_KmerFiles()") root->skip_sp = build_skip_nodes(&(root->node)); file = fopen(query_filename, "r"); ASSERT_NULL_PTR(file,"queryBFT_kmerBranching_from_KmerFiles()") printf("\nQuerying BFT for branching k-mers in %s\n\n", query_filename); if (binary_file){ if (fgets(line, 100, file) == NULL) ERROR("Cannot read header of the file") if (fgets(line, 100, file) == NULL) ERROR("Cannot read header of the file") while ((!ferror(file)) && (!feof(file))){ return_fread = fread(array_kmers, (size_t)nb_bytes_kmer, (size_t)nb_kmer_in_buf, file); for (k=0; k<(int)return_fread; k++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[k*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); kmers_read += return_fread; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } } else{ while (fgets(line, 100, file) != NULL){ if (parseKmerCount(line, root->k, array_kmers, k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ for (i=0; i<nb_kmer_in_buf; i++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } j = 0; k = 0; memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)) printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); kmers_read += nb_kmer_in_buf; } } } for (i=0; i<j; i++){ if (isBranchingRight(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } else if (isBranchingLeft(&(root->node), root, lvl_root, &(array_kmers[i*nb_bytes_kmer]), root->k) > 1){ count_branching_node++; } } memset(array_kmers, 0, SIZE_BUFFER*sizeof(uint8_t)); } fclose(file); free(array_kmers); free(line); if (root->skip_sp != NULL) free_skip_nodes(&(root->node), root->skip_sp); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nElapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return count_branching_node; } /*void par_insert_Genomes_from_KmerFiles(int nb_files, char** filenames, int binary_files, int size_kmer, int treshold_compression, char* prefix_output, int cut_lvl, int memory_limit){ ASSERT_NULL_PTR(filenames,"insert_Genomes_from_KmerFiles()") struct timeval tval_before, tval_after, tval_result; Pvoid_t* PJArray; Word_t Rc_word; annotation_array_elem* comp_set_colors_tmp; BFT_Root** root; FILE** file; uint8_t** array_kmers; char** line; char** output_filename; char** output_filename2; char* str_tmp; char* output_filename3; int* nb_bfts_on_disk; int steps = 2; int i, j, k; int lvl_root; int nb_bytes_kmer; int nb_kmer_in_buf; int it_thread, thread_id; int len_longest_annot; int len_output_filename; int len_output_filename2; int nb_merging; int nb_threads; int size_id_genome; int it_bft_thread; int length_comp_set_colors_tmp; size_t return_fread; uint64_t kmers_read; //if (memory_limit > 0) omp_set_num_threads(1); //else memory_limit = INT_MAX; omp_set_num_threads(1); #pragma omp parallel \ shared(line, array_kmers, file, tval_before, tval_after, tval_result, PJArray, nb_threads, nb_bfts_on_disk, \ lvl_root, nb_bytes_kmer, nb_kmer_in_buf, root, output_filename, output_filename2, len_output_filename,) \ private(i, j, k, thread_id, Rc_word, comp_set_colors_tmp, str_tmp, size_id_genome, return_fread, it_thread, \ length_comp_set_colors_tmp, len_longest_annot, kmers_read, len_output_filename2) { #pragma omp single { gettimeofday(&tval_before, NULL); nb_threads = omp_get_num_threads(); length_comp_set_colors_tmp = 0; size_id_genome = 0; comp_set_colors_tmp = NULL; len_output_filename = strlen(prefix_output); nb_bfts_on_disk = (int*) calloc(nb_threads, sizeof(int)); ASSERT_NULL_PTR(nb_bfts_on_disk, "par_insert_Genomes_from_KmerFiles()\n") output_filename = (char**) malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(output_filename, "par_insert_Genomes_from_KmerFiles()\n") output_filename2 = (char**) malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(output_filename2, "par_insert_Genomes_from_KmerFiles()\n") output_filename3 = (char*) malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename3, "merging_BFT()\n"); strcpy(output_filename3, prefix_output); strcpy(&output_filename3[strlen(output_filename3)], "_tmp"); line = (char**) malloc(nb_threads * sizeof(char*)); ASSERT_NULL_PTR(line, "par_insert_Genomes_from_KmerFiles()\n") array_kmers = (uint8_t**) malloc(nb_threads * sizeof(uint8_t*)); ASSERT_NULL_PTR(array_kmers, "par_insert_Genomes_from_KmerFiles()\n") file = (FILE**) malloc(nb_threads * sizeof(FILE*)); ASSERT_NULL_PTR(file, "par_insert_Genomes_from_KmerFiles()\n") PJArray = (PWord_t*) malloc(nb_threads * sizeof(PWord_t)); ASSERT_NULL_PTR(PJArray, "par_insert_Genomes_from_KmerFiles()\n") root = (BFT_Root**) malloc(nb_threads * sizeof(BFT_Root*)); ASSERT_NULL_PTR(root, "par_insert_Genomes_from_KmerFiles()\n") for (it_thread = 0; it_thread < nb_threads; it_thread++){ line[it_thread] = (char*) calloc(100, sizeof(char)); ASSERT_NULL_PTR(line[it_thread], "par_insert_Genomes_from_KmerFiles()\n") array_kmers[it_thread] = (uint8_t*) calloc(SIZE_BUFFER, sizeof(uint8_t)); ASSERT_NULL_PTR(array_kmers[it_thread], "par_insert_Genomes_from_KmerFiles()\n") output_filename[it_thread] = (char*) malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename[it_thread], "merging_BFT()\n"); output_filename2[it_thread] = (char*) malloc((len_output_filename + 30) * sizeof(char)); ASSERT_NULL_PTR(output_filename2[it_thread], "merging_BFT()\n"); strcpy(output_filename[it_thread], prefix_output); strcpy(output_filename2[it_thread], prefix_output); PJArray[it_thread] = (PWord_t)NULL; root[it_thread] = createBFT_Root(size_kmer, treshold_compression, 0); } lvl_root = (root[0]->k / NB_CHAR_SUF_PREF) - 1; nb_bytes_kmer = CEIL(root[0]->k * 2, SIZE_BITS_UINT_8T); nb_kmer_in_buf = SIZE_BUFFER/nb_bytes_kmer; } #pragma omp for for (i = 0; i < nb_files; i++){ //For each file in input thread_id = omp_get_thread_num(); kmers_read = 0; k = 0; j = 0; str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); size_id_genome = get_nb_bytes_power2_annot(root[thread_id]->nb_genomes-1); file[thread_id] = fopen(filenames[i], "r"); ASSERT_NULL_PTR(file[thread_id],"insert_Genomes_from_KmerFiles()") printf("Processing file %s\n", filenames[i]); if (binary_files){ if (fgets(line[thread_id], 100, file[thread_id]) != NULL) k = atoi(line[thread_id]); else ERROR("Cannot read header of the file") if (fgets(line[thread_id], 100, file[thread_id]) == NULL) ERROR("Cannot read header of the file") while ((return_fread = fread(array_kmers[thread_id], nb_bytes_kmer, nb_kmer_in_buf, file[thread_id])) == nb_kmer_in_buf) { insertKmers(root[thread_id], array_kmers[thread_id], return_fread, root[thread_id]->nb_genomes-1, size_id_genome); memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+return_fread)%PRINT_EVERY_X_KMERS)){ // printf("%" PRIu64 " kmers read\n", kmers_read+return_fread); if (((unsigned long long int)getCurrentRSS())/1024 >= memory_limit){ sprintf(&(output_filename[thread_id][len_output_filename]), "%d", thread_id); len_output_filename2 = strlen(output_filename[thread_id]); output_filename[thread_id][len_output_filename2] = '_'; sprintf(&(output_filename[thread_id][len_output_filename2+1]), "%d", nb_bfts_on_disk[thread_id]); nb_bfts_on_disk[thread_id]++; write_BFT_Root_sparse(root[thread_id], output_filename[thread_id], false); freeBFT_Root(root[thread_id]); root[thread_id] = createBFT_Root(size_kmer, treshold_compression, 0); str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); } } kmers_read += return_fread; return_fread = 0; } insertKmers(root[thread_id], array_kmers[thread_id], return_fread, root[thread_id]->nb_genomes-1, size_id_genome); } else { while (fgets(line[thread_id], 100, file[thread_id]) != NULL){ if (parseKmerCount(line[thread_id], root[thread_id]->k, array_kmers[thread_id], k) == 1){ k += nb_bytes_kmer; j++; if (j == nb_kmer_in_buf){ insertKmers(root[thread_id], array_kmers[thread_id], nb_kmer_in_buf, root[thread_id]->nb_genomes-1, size_id_genome); j = 0; k = 0; memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); if ((kmers_read%PRINT_EVERY_X_KMERS) > ((kmers_read+nb_kmer_in_buf)%PRINT_EVERY_X_KMERS)){ //printf("%" PRIu64 " kmers read\n", kmers_read+nb_kmer_in_buf); if (((unsigned long long int)getCurrentRSS())/1024 >= memory_limit){ sprintf(&(output_filename[thread_id][len_output_filename]), "%d", thread_id); len_output_filename2 = strlen(output_filename[thread_id]); output_filename[thread_id][len_output_filename2] = '_'; sprintf(&(output_filename[thread_id][len_output_filename2+1]), "%d", nb_bfts_on_disk[thread_id]); nb_bfts_on_disk[thread_id]++; write_BFT_Root_sparse(root[thread_id], output_filename[thread_id], false); freeBFT_Root(root[thread_id]); root[thread_id] = createBFT_Root(size_kmer, treshold_compression, 0); str_tmp = basename(filenames[i]); add_genomes_BFT_Root(1, &str_tmp, root[thread_id]); } } kmers_read += nb_kmer_in_buf; } } } insertKmers(root[thread_id], array_kmers[thread_id], j, root[thread_id]->nb_genomes-1, size_id_genome); kmers_read += j; memset(array_kmers[thread_id], 0, SIZE_BUFFER * sizeof(uint8_t)); } fclose(file[thread_id]); if (root[thread_id]->treshold_compression != 0){ if ((root[thread_id]->nb_genomes-1 > 5) && ((root[thread_id]->nb_genomes-1)%root[thread_id]->treshold_compression == 0)){ memory_Used* mem = printMemoryUsedFromNode(&(root[thread_id]->node), lvl_root, root[thread_id]->k, root[thread_id]->info_per_lvl); len_longest_annot = MAX(mem->size_biggest_annot+1, getMaxSize_annotation_array_elem(root[thread_id]->comp_set_colors)); free(mem); load_annotation_from_Node(&(root[thread_id]->node), lvl_root, root[thread_id]->k, len_longest_annot, root[thread_id]->info_per_lvl, &(PJArray[thread_id]), root[thread_id]->comp_set_colors, root[thread_id]->ann_inf); comp_set_colors_tmp = root[thread_id]->comp_set_colors; length_comp_set_colors_tmp = root[thread_id]->length_comp_set_colors; root[thread_id]->comp_set_colors = sort_annotations(&(PJArray[thread_id]), &(root[thread_id]->length_comp_set_colors), len_longest_annot); if (root[thread_id]->comp_set_colors != NULL){ compress_annotation_from_Node(&(root[thread_id]->node), lvl_root, root[thread_id]->k, root[thread_id]->info_per_lvl, &(PJArray[thread_id]), comp_set_colors_tmp, root[thread_id]->ann_inf); free_annotation_array_elem(&comp_set_colors_tmp, &length_comp_set_colors_tmp); } #if defined (_WORDx86) Word_t * PValue; uint8_t* it_index = (uint8_t*) calloc((len_longest_annot + CEIL(len_longest_annot, SIZE_BITS_UINT_8T) + 4), sizeof(uint8_t)); ASSERT_NULL_PTR(it_index, "sort_annotations()"); JSLF(PValue, PJArray[thread_id], it_index); while (PValue != NULL){ free(*PValue); JSLN(PValue, PJArray[thread_id], it_index); } free(it_index); #endif JSLFA(Rc_word, PJArray[thread_id]); } } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread++){ sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename[it_thread]); output_filename[it_thread][len_output_filename2] = '_'; sprintf(&(output_filename[it_thread][len_output_filename2+1]), "%d", nb_bfts_on_disk[it_thread]); nb_bfts_on_disk[it_thread]++; write_BFT_Root_sparse(root[it_thread], output_filename[it_thread], false); free(line[it_thread]); free(array_kmers[it_thread]); freeBFT_Root(root[it_thread]); } } free(PJArray); free(file); free(array_kmers); free(line); free(root); omp_set_nested(false); omp_set_num_threads(1); if (omp_get_num_threads() == 1){ strcpy(&output_filename[0][len_output_filename], "0_0"); strcpy(&output_filename2[0][len_output_filename], "0_0_pkd"); read_cut_BFT_Root(output_filename[0], output_filename2[0], cut_lvl, true); strcpy(&output_filename[0][len_output_filename], "0_0_pkd"); for (it_thread = 0; it_thread < nb_threads; it_thread++){ sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename2[it_thread]); output_filename2[it_thread][len_output_filename2] = '_'; for (it_bft_thread = 0; it_bft_thread < nb_bfts_on_disk[it_thread]; it_bft_thread++){ if (it_thread || it_bft_thread){ sprintf(&(output_filename2[it_thread][len_output_filename2+1]), "%d", it_bft_thread); read_cut_BFT_Root(output_filename2[it_thread], output_filename3, cut_lvl, true); printf("%s - %s\n", output_filename[0], output_filename2[it_thread]); merging_BFT(output_filename[0], output_filename3, output_filename[0], cut_lvl, true); } } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } free(nb_bfts_on_disk); } else{ #pragma omp parallel \ shared(output_filename, output_filename2, len_output_filename, nb_bfts_on_disk, tval_before, tval_after, tval_result) \ private(it_thread, it_bft_thread, nb_merging, steps, len_output_filename2) { #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread++){ steps = 2; nb_merging = nb_bfts_on_disk[it_thread] - 1; sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread); len_output_filename2 = strlen(output_filename[it_thread]); output_filename[it_thread][len_output_filename2] = '_'; output_filename2[it_thread][len_output_filename2] = '_'; while (nb_merging > 0){ for (it_bft_thread = 0; it_bft_thread + steps/2 < nb_bfts_on_disk[it_thread]; it_bft_thread += steps){ sprintf(&(output_filename[it_thread][len_output_filename2+1]), "%d", it_bft_thread); sprintf(&(output_filename2[it_thread][len_output_filename2+1]), "%d", it_bft_thread + steps/2); printf("%s - %s\n", output_filename[it_thread], output_filename2[it_thread]); merging_BFT(output_filename[it_thread], output_filename2[it_thread], output_filename[it_thread], cut_lvl, false); nb_merging--; } steps *= 2; } gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } } free(nb_bfts_on_disk); steps = 2; nb_merging = nb_threads - 1; while (nb_merging > 0){ #pragma omp parallel shared(nb_merging, steps, output_filename, output_filename2) private(it_thread, len_output_filename2) { #pragma omp for for (it_thread = 0; it_thread < nb_threads; it_thread += steps){ if (it_thread + steps/2 < nb_threads){ sprintf(&(output_filename[it_thread][len_output_filename]), "%d", it_thread); sprintf(&(output_filename2[it_thread][len_output_filename]), "%d", it_thread + steps/2); len_output_filename2 = strlen(output_filename[it_thread]); strcpy(&output_filename[it_thread][len_output_filename2], "_0"); strcpy(&output_filename2[it_thread][len_output_filename2], "_0"); merging_BFT(output_filename[it_thread], output_filename2[it_thread], output_filename[it_thread], cut_lvl, false); nb_merging--; } } } steps *= 2; gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("Total elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); } } return; }*/ void query_sequences_outputCSV(BFT_Root* root, char* query_filename, char* output_filename, double threshold, bool canonical_search){ ASSERT_NULL_PTR(root, "query_sequences_outputCSV()\n") ASSERT_NULL_PTR(query_filename, "query_sequences_outputCSV()\n") ASSERT_NULL_PTR(output_filename, "query_sequences_outputCSV()\n"); struct timeval tval_before, tval_after, tval_result; gettimeofday(&tval_before, NULL); if (threshold <= 0) ERROR("query_sequences_outputCSV(): the threshold must be superior to 0.\n"); if (threshold > 1) ERROR("query_sequences_outputCSV(): the threshold must be inferior or equal to 1.\n"); const char nl = '\n'; const char eol = '\0'; const char csv_sep = ','; const char not_present = '0'; const char present = '1'; uint64_t nb_queries = 0; size_t size_buffer_queries = SIZE_BUFFER; uint32_t i, it_annot, it_csv_line_res; uint32_t* ids_present; char* buffer_queries = (char*) calloc(size_buffer_queries, sizeof(char)); ASSERT_NULL_PTR(buffer_queries,"query_sequences_outputCSV()\n"); char* csv_line_res = (char*) calloc(root->nb_genomes * 2, sizeof(char)); ASSERT_NULL_PTR(csv_line_res,"query_sequences_outputCSV()\n"); FILE* file_query = fopen(query_filename, "r"); ASSERT_NULL_PTR(file_query,"query_sequences_outputCSV()\n") FILE* file_output = fopen(output_filename, "w"); ASSERT_NULL_PTR(file_output,"query_sequences_outputCSV()\n") //prepare_shuffling_dictionary(); for (i = 0; i < root->nb_genomes - 1; i++){ fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); fwrite(&csv_sep, sizeof(char), 1, file_output); csv_line_res[i * 2 + 1] = csv_sep; } csv_line_res[root->nb_genomes * 2 - 1] = nl; fwrite(root->filenames[i], sizeof(char), strlen(root->filenames[i]), file_output); if (fwrite(&nl, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); while (getline(&buffer_queries, &size_buffer_queries, file_query) != -1){ buffer_queries[strcspn(buffer_queries, "\r\n")] = '\0'; it_csv_line_res = 0; ids_present = query_sequence(root, buffer_queries, threshold, canonical_search); if ((ids_present != NULL) && ids_present[0]){ for (it_annot = 1; it_annot <= ids_present[0]; it_annot++){ for (i = 0; i < ids_present[it_annot] - (it_annot == 1 ? 0 : ids_present[it_annot - 1] + 1); i++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; csv_line_res[it_csv_line_res] = present; it_csv_line_res += 2; } for (it_annot = ids_present[ids_present[0]] + 1; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } else{ for (it_annot = 0; it_annot < root->nb_genomes; it_annot++, it_csv_line_res += 2) csv_line_res[it_csv_line_res] = not_present; } if (fwrite(csv_line_res, sizeof(char), root->nb_genomes * 2, file_output) != root->nb_genomes * 2) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); if (ids_present != NULL) free(ids_present); nb_queries++; } fseek(file_output, 0 - ((long int) sizeof(char)), SEEK_CUR); if (fwrite(&eol, sizeof(char), 1, file_output) != 1) ERROR("query_sequences_outputCSV(): could not write output to CSV file.\n"); free(csv_line_res); free(buffer_queries); fclose(file_query); fclose(file_output); gettimeofday(&tval_after, NULL); time_spent(&tval_before, &tval_after, &tval_result); printf("\nFile %s has been processed.\n", query_filename); printf("Elapsed time: %ld.%06ld s\n", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec); printf("Peak of memory: %llu mb\n", ((unsigned long long int)getPeakRSS())/1024); printf("Current memory: %llu mb\n", ((unsigned long long int)getCurrentRSS())/1024); return; }
choleskies_cython.c
/* Generated by Cython 0.29.2 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_2" #define CYTHON_HEX_VERSION 0x001D02F0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_USE_DICT_VERSIONS #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; // PyThread_create_key reports success always } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif // TSS (Thread Specific Storage) API #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #define PyObject_Unicode PyObject_Str #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__GPy__util__choleskies_cython #define __PYX_HAVE_API__GPy__util__choleskies_cython /* Early includes */ #include <string.h> #include <stdio.h> #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "pythread.h" #include <stdlib.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* Header.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "GPy/util/choleskies_cython.pyx", "__init__.pxd", "stringsource", "type.pxd", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":778 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":784 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":786 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":800 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":804 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":805 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":806 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":808 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":809 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":811 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":812 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* "scipy/linalg/cython_blas.pxd":15 * # The original libraries should be linked directly. * * ctypedef float s # <<<<<<<<<<<<<< * ctypedef double d * ctypedef float complex c */ typedef float __pyx_t_5scipy_6linalg_11cython_blas_s; /* "scipy/linalg/cython_blas.pxd":16 * * ctypedef float s * ctypedef double d # <<<<<<<<<<<<<< * ctypedef float complex c * ctypedef double complex z */ typedef double __pyx_t_5scipy_6linalg_11cython_blas_d; /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* Declarations.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":817 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":819 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":961 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key); #define __Pyx_PyObject_Dict_GetItem(obj, name)\ (likely(PyDict_CheckExact(obj)) ?\ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name)) #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* TypeImport.proto */ #ifndef __PYX_HAVE_RT_ImportType_proto #define __PYX_HAVE_RT_ImportType_proto enum __Pyx_ImportType_CheckSize { __Pyx_ImportType_CheckSize_Error = 0, __Pyx_ImportType_CheckSize_Warn = 1, __Pyx_ImportType_CheckSize_Ignore = 2 }; static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); #endif /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *, int writable_flag); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* RealImag.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX\ && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_float(a, b) ((a)==(b)) #define __Pyx_c_sum_float(a, b) ((a)+(b)) #define __Pyx_c_diff_float(a, b) ((a)-(b)) #define __Pyx_c_prod_float(a, b) ((a)*(b)) #define __Pyx_c_quot_float(a, b) ((a)/(b)) #define __Pyx_c_neg_float(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_float(z) ((z)==(float)0) #define __Pyx_c_conj_float(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_float(z) (::std::abs(z)) #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_float(z) ((z)==0) #define __Pyx_c_conj_float(z) (conjf(z)) #if 1 #define __Pyx_c_abs_float(z) (cabsf(z)) #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* Arithmetic.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq_double(a, b) ((a)==(b)) #define __Pyx_c_sum_double(a, b) ((a)+(b)) #define __Pyx_c_diff_double(a, b) ((a)-(b)) #define __Pyx_c_prod_double(a, b) ((a)*(b)) #define __Pyx_c_quot_double(a, b) ((a)/(b)) #define __Pyx_c_neg_double(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero_double(z) ((z)==(double)0) #define __Pyx_c_conj_double(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs_double(z) (::std::abs(z)) #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero_double(z) ((z)==0) #define __Pyx_c_conj_double(z) (conj(z)) #if 1 #define __Pyx_c_abs_double(z) (cabs(z)) #define __Pyx_c_pow_double(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *, int writable_flag); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* FunctionImport.proto */ static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'cpython.mem' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void); /*proto*/ /* Module declarations from 'scipy.linalg.cython_blas' */ static __pyx_t_5scipy_6linalg_11cython_blas_d (*__pyx_f_5scipy_6linalg_11cython_blas_ddot)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_11cython_blas_dscal)(int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ static void (*__pyx_f_5scipy_6linalg_11cython_blas_dsymv)(char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *); /*proto*/ /* Module declarations from 'GPy.util.choleskies_cython' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "GPy.util.choleskies_cython" extern int __pyx_module_is_main_GPy__util__choleskies_cython; int __pyx_module_is_main_GPy__util__choleskies_cython = 0; /* Implementation of 'GPy.util.choleskies_cython' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_xrange; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static PyObject *__pyx_builtin_ImportError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_D[] = "D"; static const char __pyx_k_L[] = "L"; static const char __pyx_k_M[] = "M"; static const char __pyx_k_N[] = "N"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_d[] = "d"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_j[] = "j"; static const char __pyx_k_k[] = "k"; static const char __pyx_k_m[] = "m"; static const char __pyx_k_dL[] = "dL"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_mm[] = "mm"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_ret[] = "ret"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_flat[] = "flat"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_tril[] = "tril"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_count[] = "count"; static const char __pyx_k_dL_dK[] = "dL_dK"; static const char __pyx_k_empty[] = "empty"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_L_cont[] = "L_cont"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_xrange[] = "xrange"; static const char __pyx_k_asarray[] = "asarray"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_ImportError[] = "ImportError"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_flat_to_triang[] = "flat_to_triang"; static const char __pyx_k_triang_to_flat[] = "triang_to_flat"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_ascontiguousarray[] = "ascontiguousarray"; static const char __pyx_k_backprop_gradient[] = "backprop_gradient"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_backprop_gradient_par[] = "backprop_gradient_par"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_backprop_gradient_par_c[] = "backprop_gradient_par_c"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_GPy_util_choleskies_cython[] = "GPy.util.choleskies_cython"; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_GPy_util_choleskies_cython_pyx[] = "GPy/util/choleskies_cython.pyx"; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_D; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_n_s_GPy_util_choleskies_cython; static PyObject *__pyx_kp_s_GPy_util_choleskies_cython_pyx; static PyObject *__pyx_n_s_ImportError; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_L; static PyObject *__pyx_n_s_L_cont; static PyObject *__pyx_n_s_M; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_s_N; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_asarray; static PyObject *__pyx_n_s_ascontiguousarray; static PyObject *__pyx_n_s_backprop_gradient; static PyObject *__pyx_n_s_backprop_gradient_par; static PyObject *__pyx_n_s_backprop_gradient_par_c; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_count; static PyObject *__pyx_n_s_d; static PyObject *__pyx_n_s_dL; static PyObject *__pyx_n_s_dL_dK; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_empty; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_flat; static PyObject *__pyx_n_s_flat_to_triang; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_j; static PyObject *__pyx_n_s_k; static PyObject *__pyx_n_s_m; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mm; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_ret; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_triang_to_flat; static PyObject *__pyx_n_s_tril; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_xrange; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__22; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__16; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__28; static PyObject *__pyx_tuple__30; static PyObject *__pyx_tuple__32; static PyObject *__pyx_tuple__34; static PyObject *__pyx_tuple__36; static PyObject *__pyx_tuple__37; static PyObject *__pyx_tuple__38; static PyObject *__pyx_tuple__39; static PyObject *__pyx_tuple__40; static PyObject *__pyx_tuple__41; static PyObject *__pyx_codeobj__27; static PyObject *__pyx_codeobj__29; static PyObject *__pyx_codeobj__31; static PyObject *__pyx_codeobj__33; static PyObject *__pyx_codeobj__35; static PyObject *__pyx_codeobj__42; /* Late includes */ /* "GPy/util/choleskies_cython.pyx":14 * np.import_array() * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static char __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang[] = "take a matrix N x D and return a D X M x M array where\n\n N = M(M+1)/2\n\n the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat.\n "; static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang = {"flat_to_triang", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang, METH_VARARGS|METH_KEYWORDS, __pyx_doc_3GPy_4util_17choleskies_cython_flat_to_triang}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_1flat_to_triang(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_M; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("flat_to_triang (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_flat,&__pyx_n_s_M,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flat)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_M)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, 1); __PYX_ERR(0, 14, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "flat_to_triang") < 0)) __PYX_ERR(0, 14, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_flat = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_flat.memview)) __PYX_ERR(0, 14, __pyx_L3_error) __pyx_v_M = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_M == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 14, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("flat_to_triang", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 14, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(__pyx_self, __pyx_v_flat, __pyx_v_M); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_flat_to_triang(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_flat, int __pyx_v_M) { int __pyx_v_D; CYTHON_UNUSED int __pyx_v_N; int __pyx_v_count; __Pyx_memviewslice __pyx_v_ret = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_d; int __pyx_v_m; int __pyx_v_mm; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; int __pyx_t_13; long __pyx_t_14; long __pyx_t_15; int __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; __Pyx_RefNannySetupContext("flat_to_triang", 0); /* "GPy/util/choleskies_cython.pyx":21 * the lower triangluar portion of the d'th slice of the result is filled by the d'th column of flat. * """ * cdef int D = flat.shape[1] # <<<<<<<<<<<<<< * cdef int N = flat.shape[0] * cdef int count = 0 */ __pyx_v_D = (__pyx_v_flat.shape[1]); /* "GPy/util/choleskies_cython.pyx":22 * """ * cdef int D = flat.shape[1] * cdef int N = flat.shape[0] # <<<<<<<<<<<<<< * cdef int count = 0 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) */ __pyx_v_N = (__pyx_v_flat.shape[0]); /* "GPy/util/choleskies_cython.pyx":23 * cdef int D = flat.shape[1] * cdef int N = flat.shape[0] * cdef int count = 0 # <<<<<<<<<<<<<< * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":24 * cdef int N = flat.shape[0] * cdef int count = 0 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) # <<<<<<<<<<<<<< * cdef int d, m, mm * with nogil: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_M); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_5, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_6); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_ret = __pyx_t_7; __pyx_t_7.memview = NULL; __pyx_t_7.data = NULL; /* "GPy/util/choleskies_cython.pyx":26 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":27 * cdef int d, m, mm * with nogil: * for d in range(D): # <<<<<<<<<<<<<< * count = 0 * for m in range(M): */ __pyx_t_8 = __pyx_v_D; __pyx_t_9 = __pyx_t_8; for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { __pyx_v_d = __pyx_t_10; /* "GPy/util/choleskies_cython.pyx":28 * with nogil: * for d in range(D): * count = 0 # <<<<<<<<<<<<<< * for m in range(M): * for mm in range(m+1): */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":29 * for d in range(D): * count = 0 * for m in range(M): # <<<<<<<<<<<<<< * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] */ __pyx_t_11 = __pyx_v_M; __pyx_t_12 = __pyx_t_11; for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_m = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":30 * count = 0 * for m in range(M): * for mm in range(m+1): # <<<<<<<<<<<<<< * ret[d, m, mm] = flat[count,d] * count += 1 */ __pyx_t_14 = (__pyx_v_m + 1); __pyx_t_15 = __pyx_t_14; for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) { __pyx_v_mm = __pyx_t_16; /* "GPy/util/choleskies_cython.pyx":31 * for m in range(M): * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] # <<<<<<<<<<<<<< * count += 1 * return ret */ __pyx_t_17 = __pyx_v_count; __pyx_t_18 = __pyx_v_d; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_flat.shape[0]; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_flat.shape[1]; __pyx_t_19 = __pyx_v_d; __pyx_t_20 = __pyx_v_m; __pyx_t_21 = __pyx_v_mm; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_ret.shape[0]; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_ret.shape[1]; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_ret.shape[2]; *((double *) ( /* dim=2 */ ((char *) (((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_ret.data + __pyx_t_19 * __pyx_v_ret.strides[0]) ) + __pyx_t_20 * __pyx_v_ret.strides[1]) )) + __pyx_t_21)) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_17 * __pyx_v_flat.strides[0]) ) + __pyx_t_18 * __pyx_v_flat.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":32 * for mm in range(m+1): * ret[d, m, mm] = flat[count,d] * count += 1 # <<<<<<<<<<<<<< * return ret * */ __pyx_v_count = (__pyx_v_count + 1); } } } } /* "GPy/util/choleskies_cython.pyx":26 * cdef double[:, :, ::1] ret = np.zeros((D, M, M)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":33 * ret[d, m, mm] = flat[count,d] * count += 1 * return ret # <<<<<<<<<<<<<< * * def triang_to_flat(double[:, :, :] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_ret, 3, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":14 * np.import_array() * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __PYX_XDEC_MEMVIEW(&__pyx_t_7, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.flat_to_triang", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_ret, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":35 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat = {"triang_to_flat", (PyCFunction)__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat, METH_O, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_3triang_to_flat(PyObject *__pyx_self, PyObject *__pyx_arg_L) { __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("triang_to_flat (wrapper)", 0); assert(__pyx_arg_L); { __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(__pyx_arg_L, PyBUF_WRITABLE); if (unlikely(!__pyx_v_L.memview)) __PYX_ERR(0, 35, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(__pyx_self, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_2triang_to_flat(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_L) { int __pyx_v_D; int __pyx_v_M; int __pyx_v_N; int __pyx_v_count; __Pyx_memviewslice __pyx_v_flat = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_d; int __pyx_v_m; int __pyx_v_mm; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; long __pyx_t_13; long __pyx_t_14; int __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; __Pyx_RefNannySetupContext("triang_to_flat", 0); /* "GPy/util/choleskies_cython.pyx":36 * * def triang_to_flat(double[:, :, :] L): * cdef int D = L.shape[0] # <<<<<<<<<<<<<< * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 */ __pyx_v_D = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":37 * def triang_to_flat(double[:, :, :] L): * cdef int D = L.shape[0] * cdef int M = L.shape[1] # <<<<<<<<<<<<<< * cdef int N = M*(M+1)/2 * cdef int count = 0 */ __pyx_v_M = (__pyx_v_L.shape[1]); /* "GPy/util/choleskies_cython.pyx":38 * cdef int D = L.shape[0] * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 # <<<<<<<<<<<<<< * cdef int count = 0 * cdef double[:, ::1] flat = np.empty((N, D)) */ __pyx_v_N = __Pyx_div_long((__pyx_v_M * (__pyx_v_M + 1)), 2); /* "GPy/util/choleskies_cython.pyx":39 * cdef int M = L.shape[1] * cdef int N = M*(M+1)/2 * cdef int count = 0 # <<<<<<<<<<<<<< * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":40 * cdef int N = M*(M+1)/2 * cdef int count = 0 * cdef double[:, ::1] flat = np.empty((N, D)) # <<<<<<<<<<<<<< * cdef int d, m, mm * with nogil: */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_empty); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_D); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_4 = 0; __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 40, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_flat = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "GPy/util/choleskies_cython.pyx":42 * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":43 * cdef int d, m, mm * with nogil: * for d in range(D): # <<<<<<<<<<<<<< * count = 0 * for m in range(M): */ __pyx_t_7 = __pyx_v_D; __pyx_t_8 = __pyx_t_7; for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_d = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":44 * with nogil: * for d in range(D): * count = 0 # <<<<<<<<<<<<<< * for m in range(M): * for mm in range(m+1): */ __pyx_v_count = 0; /* "GPy/util/choleskies_cython.pyx":45 * for d in range(D): * count = 0 * for m in range(M): # <<<<<<<<<<<<<< * for mm in range(m+1): * flat[count,d] = L[d, m, mm] */ __pyx_t_10 = __pyx_v_M; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = 0; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_m = __pyx_t_12; /* "GPy/util/choleskies_cython.pyx":46 * count = 0 * for m in range(M): * for mm in range(m+1): # <<<<<<<<<<<<<< * flat[count,d] = L[d, m, mm] * count += 1 */ __pyx_t_13 = (__pyx_v_m + 1); __pyx_t_14 = __pyx_t_13; for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) { __pyx_v_mm = __pyx_t_15; /* "GPy/util/choleskies_cython.pyx":47 * for m in range(M): * for mm in range(m+1): * flat[count,d] = L[d, m, mm] # <<<<<<<<<<<<<< * count += 1 * return flat */ __pyx_t_16 = __pyx_v_d; __pyx_t_17 = __pyx_v_m; __pyx_t_18 = __pyx_v_mm; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1]; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_L.shape[2]; __pyx_t_19 = __pyx_v_count; __pyx_t_20 = __pyx_v_d; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_flat.shape[0]; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_flat.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_flat.data + __pyx_t_19 * __pyx_v_flat.strides[0]) )) + __pyx_t_20)) )) = (*((double *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) ) + __pyx_t_17 * __pyx_v_L.strides[1]) ) + __pyx_t_18 * __pyx_v_L.strides[2]) ))); /* "GPy/util/choleskies_cython.pyx":48 * for mm in range(m+1): * flat[count,d] = L[d, m, mm] * count += 1 # <<<<<<<<<<<<<< * return flat * */ __pyx_v_count = (__pyx_v_count + 1); } } } } /* "GPy/util/choleskies_cython.pyx":42 * cdef double[:, ::1] flat = np.empty((N, D)) * cdef int d, m, mm * with nogil: # <<<<<<<<<<<<<< * for d in range(D): * count = 0 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":49 * flat[count,d] = L[d, m, mm] * count += 1 * return flat # <<<<<<<<<<<<<< * * def backprop_gradient(double[:, :] dL, double[:, :] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_flat, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":35 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.triang_to_flat", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_flat, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":51 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient = {"backprop_gradient", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_5backprop_gradient(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, 1); __PYX_ERR(0, 51, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient") < 0)) __PYX_ERR(0, 51, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_dL.memview)) __PYX_ERR(0, 51, __pyx_L3_error) __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_L.memview)) __PYX_ERR(0, 51, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 51, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_4backprop_gradient(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; int __pyx_v_k; int __pyx_v_j; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_t_10; int __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; __Pyx_RefNannySetupContext("backprop_gradient", 0); /* "GPy/util/choleskies_cython.pyx":52 * * def backprop_gradient(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * cdef int k, j, i */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 52, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 52, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "GPy/util/choleskies_cython.pyx":53 * def backprop_gradient(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * cdef int k, j, i * with nogil: */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":55 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":56 * cdef int k, j, i * with nogil: * for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<< * for j in range(k + 1, N): * for i in range(j, N): */ for (__pyx_t_6 = (__pyx_v_N - 1); __pyx_t_6 > -1; __pyx_t_6-=1) { __pyx_v_k = __pyx_t_6; /* "GPy/util/choleskies_cython.pyx":57 * with nogil: * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): # <<<<<<<<<<<<<< * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] */ __pyx_t_7 = __pyx_v_N; __pyx_t_8 = __pyx_t_7; for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":58 * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): * for i in range(j, N): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] */ __pyx_t_10 = __pyx_v_N; __pyx_t_11 = __pyx_t_10; for (__pyx_t_12 = __pyx_v_j; __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_i = __pyx_t_12; /* "GPy/util/choleskies_cython.pyx":59 * for j in range(k + 1, N): * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<< * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): */ __pyx_t_13 = __pyx_v_i; __pyx_t_14 = __pyx_v_j; if (__pyx_t_13 < 0) __pyx_t_13 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL_dK.shape[1]; __pyx_t_15 = __pyx_v_j; __pyx_t_16 = __pyx_v_k; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_L.shape[0]; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[1]; __pyx_t_17 = __pyx_v_i; __pyx_t_18 = __pyx_v_k; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_17 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_18)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_13 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_14)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_15 * __pyx_v_L.strides[0]) ) + __pyx_t_16 * __pyx_v_L.strides[1]) )))); /* "GPy/util/choleskies_cython.pyx":60 * for i in range(j, N): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] # <<<<<<<<<<<<<< * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] */ __pyx_t_19 = __pyx_v_i; __pyx_t_20 = __pyx_v_j; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_dL_dK.shape[1]; __pyx_t_21 = __pyx_v_i; __pyx_t_22 = __pyx_v_k; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_L.shape[0]; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_L.shape[1]; __pyx_t_23 = __pyx_v_j; __pyx_t_24 = __pyx_v_k; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_23 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_24)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_19 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_20)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_21 * __pyx_v_L.strides[0]) ) + __pyx_t_22 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":61 * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): # <<<<<<<<<<<<<< * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] */ __pyx_t_7 = __pyx_v_N; __pyx_t_8 = __pyx_t_7; for (__pyx_t_9 = (__pyx_v_k + 1); __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { __pyx_v_j = __pyx_t_9; /* "GPy/util/choleskies_cython.pyx":62 * dL_dK[j, k] -= dL_dK[i, j] * L[i, k] * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<< * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) */ __pyx_t_25 = __pyx_v_k; __pyx_t_26 = __pyx_v_k; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_L.shape[0]; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_L.shape[1]; __pyx_t_27 = __pyx_v_j; __pyx_t_28 = __pyx_v_k; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_27 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_28)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_25 * __pyx_v_L.strides[0]) ) + __pyx_t_26 * __pyx_v_L.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":63 * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<< * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK */ __pyx_t_29 = __pyx_v_j; __pyx_t_30 = __pyx_v_k; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_L.shape[0]; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_L.shape[1]; __pyx_t_31 = __pyx_v_j; __pyx_t_32 = __pyx_v_k; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_dL_dK.shape[1]; __pyx_t_33 = __pyx_v_k; __pyx_t_34 = __pyx_v_k; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_33 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_34)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_29 * __pyx_v_L.strides[0]) ) + __pyx_t_30 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_31 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_32)) )))); } /* "GPy/util/choleskies_cython.pyx":64 * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<< * return dL_dK * */ __pyx_t_35 = __pyx_v_k; __pyx_t_36 = __pyx_v_k; if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_L.shape[0]; if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_v_L.shape[1]; __pyx_t_37 = __pyx_v_k; __pyx_t_38 = __pyx_v_k; if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_37 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_38)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_35 * __pyx_v_L.strides[0]) ) + __pyx_t_36 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":55 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * for j in range(k + 1, N): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":65 * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK # <<<<<<<<<<<<<< * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":51 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":67 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par = {"backprop_gradient_par", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_7backprop_gradient_par(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient_par (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, 1); __PYX_ERR(0, 67, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par") < 0)) __PYX_ERR(0, 67, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_dL.memview)) __PYX_ERR(0, 67, __pyx_L3_error) __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_L.memview)) __PYX_ERR(0, 67, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient_par", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 67, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_6backprop_gradient_par(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; int __pyx_v_k; int __pyx_v_j; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_6; long __pyx_t_7; int __pyx_t_8; long __pyx_t_9; long __pyx_t_10; long __pyx_t_11; long __pyx_t_12; int __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; int __pyx_t_20; int __pyx_t_21; Py_ssize_t __pyx_t_22; Py_ssize_t __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; Py_ssize_t __pyx_t_36; Py_ssize_t __pyx_t_37; Py_ssize_t __pyx_t_38; Py_ssize_t __pyx_t_39; Py_ssize_t __pyx_t_40; Py_ssize_t __pyx_t_41; __Pyx_RefNannySetupContext("backprop_gradient_par", 0); /* "GPy/util/choleskies_cython.pyx":68 * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): * cdef double[:,::1] dL_dK = np.tril(dL) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * cdef int k, j, i */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 68, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "GPy/util/choleskies_cython.pyx":69 * def backprop_gradient_par(double[:,:] dL, double[:,:] L): * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * cdef int k, j, i * with nogil: */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":71 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * with parallel(): */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":72 * cdef int k, j, i * with nogil: * for k in range(N - 1, -1, -1): # <<<<<<<<<<<<<< * with parallel(): * for i in prange(k + 1, N): */ for (__pyx_t_6 = (__pyx_v_N - 1); __pyx_t_6 > -1; __pyx_t_6-=1) { __pyx_v_k = __pyx_t_6; /* "GPy/util/choleskies_cython.pyx":73 * with nogil: * for k in range(N - 1, -1, -1): * with parallel(): # <<<<<<<<<<<<<< * for i in prange(k + 1, N): * for j in range(k+1, i+1): */ { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15, __pyx_t_16, __pyx_t_17, __pyx_t_18, __pyx_t_19, __pyx_t_20, __pyx_t_21, __pyx_t_22, __pyx_t_23, __pyx_t_24, __pyx_t_25, __pyx_t_26, __pyx_t_27, __pyx_t_7, __pyx_t_8, __pyx_t_9) #endif /* _OPENMP */ { /* "GPy/util/choleskies_cython.pyx":74 * for k in range(N - 1, -1, -1): * with parallel(): * for i in prange(k + 1, N): # <<<<<<<<<<<<<< * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] */ __pyx_t_7 = (__pyx_v_k + 1); __pyx_t_8 = __pyx_v_N; if (1 == 0) abort(); { __pyx_t_10 = (__pyx_t_8 - __pyx_t_7 + 1 - 1/abs(1)) / 1; if (__pyx_t_10 > 0) { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){ { __pyx_v_i = (int)(__pyx_t_7 + 1 * __pyx_t_9); /* Initialize private variables to invalid values */ __pyx_v_j = ((int)0xbad0bad0); /* "GPy/util/choleskies_cython.pyx":75 * with parallel(): * for i in prange(k + 1, N): * for j in range(k+1, i+1): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): */ __pyx_t_11 = (__pyx_v_i + 1); __pyx_t_12 = __pyx_t_11; for (__pyx_t_13 = (__pyx_v_k + 1); __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { __pyx_v_j = __pyx_t_13; /* "GPy/util/choleskies_cython.pyx":76 * for i in prange(k + 1, N): * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] # <<<<<<<<<<<<<< * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] */ __pyx_t_14 = __pyx_v_i; __pyx_t_15 = __pyx_v_j; if (__pyx_t_14 < 0) __pyx_t_14 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL_dK.shape[1]; __pyx_t_16 = __pyx_v_j; __pyx_t_17 = __pyx_v_k; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_L.shape[0]; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[1]; __pyx_t_18 = __pyx_v_i; __pyx_t_19 = __pyx_v_k; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_18 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_19)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_14 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_15)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_16 * __pyx_v_L.strides[0]) ) + __pyx_t_17 * __pyx_v_L.strides[1]) )))); } /* "GPy/util/choleskies_cython.pyx":77 * for j in range(k+1, i+1): * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): # <<<<<<<<<<<<<< * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): */ __pyx_t_13 = __pyx_v_N; __pyx_t_20 = __pyx_t_13; for (__pyx_t_21 = __pyx_v_i; __pyx_t_21 < __pyx_t_20; __pyx_t_21+=1) { __pyx_v_j = __pyx_t_21; /* "GPy/util/choleskies_cython.pyx":78 * dL_dK[i, k] -= dL_dK[i, j] * L[j, k] * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] # <<<<<<<<<<<<<< * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] */ __pyx_t_22 = __pyx_v_j; __pyx_t_23 = __pyx_v_i; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_23 < 0) __pyx_t_23 += __pyx_v_dL_dK.shape[1]; __pyx_t_24 = __pyx_v_j; __pyx_t_25 = __pyx_v_k; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_L.shape[0]; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_L.shape[1]; __pyx_t_26 = __pyx_v_i; __pyx_t_27 = __pyx_v_k; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_26 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_27)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_22 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_23)) ))) * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_24 * __pyx_v_L.strides[0]) ) + __pyx_t_25 * __pyx_v_L.strides[1]) )))); } } } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif /* "GPy/util/choleskies_cython.pyx":79 * for j in range(i, N): * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): # <<<<<<<<<<<<<< * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] */ __pyx_t_8 = __pyx_v_N; __pyx_t_13 = __pyx_t_8; for (__pyx_t_20 = (__pyx_v_k + 1); __pyx_t_20 < __pyx_t_13; __pyx_t_20+=1) { __pyx_v_j = __pyx_t_20; /* "GPy/util/choleskies_cython.pyx":80 * dL_dK[i, k] -= dL_dK[j, i] * L[j, k] * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] # <<<<<<<<<<<<<< * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) */ __pyx_t_28 = __pyx_v_k; __pyx_t_29 = __pyx_v_k; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_L.shape[0]; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_L.shape[1]; __pyx_t_30 = __pyx_v_j; __pyx_t_31 = __pyx_v_k; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_30 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_31)) )) /= (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_28 * __pyx_v_L.strides[0]) ) + __pyx_t_29 * __pyx_v_L.strides[1]) ))); /* "GPy/util/choleskies_cython.pyx":81 * for j in range(k + 1, N): * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] # <<<<<<<<<<<<<< * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK */ __pyx_t_32 = __pyx_v_j; __pyx_t_33 = __pyx_v_k; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_L.shape[0]; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_L.shape[1]; __pyx_t_34 = __pyx_v_j; __pyx_t_35 = __pyx_v_k; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_dL_dK.shape[1]; __pyx_t_36 = __pyx_v_k; __pyx_t_37 = __pyx_v_k; if (__pyx_t_36 < 0) __pyx_t_36 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_37 < 0) __pyx_t_37 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_36 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_37)) )) -= ((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_32 * __pyx_v_L.strides[0]) ) + __pyx_t_33 * __pyx_v_L.strides[1]) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_34 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_35)) )))); } /* "GPy/util/choleskies_cython.pyx":82 * dL_dK[j, k] /= L[k, k] * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) # <<<<<<<<<<<<<< * return dL_dK * */ __pyx_t_38 = __pyx_v_k; __pyx_t_39 = __pyx_v_k; if (__pyx_t_38 < 0) __pyx_t_38 += __pyx_v_L.shape[0]; if (__pyx_t_39 < 0) __pyx_t_39 += __pyx_v_L.shape[1]; __pyx_t_40 = __pyx_v_k; __pyx_t_41 = __pyx_v_k; if (__pyx_t_40 < 0) __pyx_t_40 += __pyx_v_dL_dK.shape[0]; if (__pyx_t_41 < 0) __pyx_t_41 += __pyx_v_dL_dK.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL_dK.data + __pyx_t_40 * __pyx_v_dL_dK.strides[0]) )) + __pyx_t_41)) )) /= (2. * (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_L.data + __pyx_t_38 * __pyx_v_L.strides[0]) ) + __pyx_t_39 * __pyx_v_L.strides[1]) )))); } } /* "GPy/util/choleskies_cython.pyx":71 * cdef int N = L.shape[0] * cdef int k, j, i * with nogil: # <<<<<<<<<<<<<< * for k in range(N - 1, -1, -1): * with parallel(): */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":83 * dL_dK[k, k] -= L[j, k] * dL_dK[j, k] * dL_dK[k, k] /= (2. * L[k, k]) * return dL_dK # <<<<<<<<<<<<<< * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":67 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "GPy/util/choleskies_cython.pyx":85 * return dL_dK * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<< * cdef int i, k, n * */ static void __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(int __pyx_v_N, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { int __pyx_v_i; int __pyx_v_k; int __pyx_v_n; double __pyx_v_alpha; double __pyx_v_beta; int __pyx_v_incx; double __pyx_v_scale; Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; Py_ssize_t __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; long __pyx_t_12; long __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; Py_ssize_t __pyx_t_17; Py_ssize_t __pyx_t_18; Py_ssize_t __pyx_t_19; Py_ssize_t __pyx_t_20; Py_ssize_t __pyx_t_21; Py_ssize_t __pyx_t_22; double __pyx_t_23; Py_ssize_t __pyx_t_24; Py_ssize_t __pyx_t_25; Py_ssize_t __pyx_t_26; Py_ssize_t __pyx_t_27; Py_ssize_t __pyx_t_28; Py_ssize_t __pyx_t_29; Py_ssize_t __pyx_t_30; Py_ssize_t __pyx_t_31; Py_ssize_t __pyx_t_32; Py_ssize_t __pyx_t_33; Py_ssize_t __pyx_t_34; Py_ssize_t __pyx_t_35; /* "GPy/util/choleskies_cython.pyx":89 * * # DSYMV required constant arguments * cdef double alpha=-1, beta=1 # <<<<<<<<<<<<<< * cdef int incx=N * */ __pyx_v_alpha = -1.0; __pyx_v_beta = 1.0; /* "GPy/util/choleskies_cython.pyx":90 * # DSYMV required constant arguments * cdef double alpha=-1, beta=1 * cdef int incx=N # <<<<<<<<<<<<<< * * # DSCAL required arguments */ __pyx_v_incx = __pyx_v_N; /* "GPy/util/choleskies_cython.pyx":95 * cdef double scale * * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) # <<<<<<<<<<<<<< * for k in range(N-2, -1, -1): * n = N-k-1 */ __pyx_t_1 = (__pyx_v_N - 1); __pyx_t_2 = (__pyx_v_N - 1); if (__pyx_t_1 < 0) __pyx_t_1 += __pyx_v_L.shape[0]; if (__pyx_t_2 < 0) __pyx_t_2 += __pyx_v_L.shape[1]; __pyx_t_3 = (__pyx_v_N - 1); __pyx_t_4 = (__pyx_v_N - 1); if (__pyx_t_3 < 0) __pyx_t_3 += __pyx_v_dL.shape[0]; if (__pyx_t_4 < 0) __pyx_t_4 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_3 * __pyx_v_dL.strides[0]) )) + __pyx_t_4)) )) /= (2. * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_1 * __pyx_v_L.strides[0]) )) + __pyx_t_2)) )))); /* "GPy/util/choleskies_cython.pyx":96 * * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) * for k in range(N-2, -1, -1): # <<<<<<<<<<<<<< * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, */ for (__pyx_t_5 = (__pyx_v_N - 2); __pyx_t_5 > -1; __pyx_t_5-=1) { __pyx_v_k = __pyx_t_5; /* "GPy/util/choleskies_cython.pyx":97 * dL[N - 1, N - 1] /= (2. * L[N - 1, N - 1]) * for k in range(N-2, -1, -1): * n = N-k-1 # <<<<<<<<<<<<<< * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, * beta=&beta, y=&dL[k + 1, k], incy=&N) */ __pyx_v_n = ((__pyx_v_N - __pyx_v_k) - 1); /* "GPy/util/choleskies_cython.pyx":98 * for k in range(N-2, -1, -1): * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<< * beta=&beta, y=&dL[k + 1, k], incy=&N) * */ __pyx_t_6 = (__pyx_v_k + 1); __pyx_t_7 = (__pyx_v_k + 1); if (__pyx_t_6 < 0) __pyx_t_6 += __pyx_v_dL.shape[0]; if (__pyx_t_7 < 0) __pyx_t_7 += __pyx_v_dL.shape[1]; __pyx_t_8 = (__pyx_v_k + 1); __pyx_t_9 = __pyx_v_k; if (__pyx_t_8 < 0) __pyx_t_8 += __pyx_v_L.shape[0]; if (__pyx_t_9 < 0) __pyx_t_9 += __pyx_v_L.shape[1]; /* "GPy/util/choleskies_cython.pyx":99 * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, * beta=&beta, y=&dL[k + 1, k], incy=&N) # <<<<<<<<<<<<<< * * for i in xrange(0, N - k - 1): */ __pyx_t_10 = (__pyx_v_k + 1); __pyx_t_11 = __pyx_v_k; if (__pyx_t_10 < 0) __pyx_t_10 += __pyx_v_dL.shape[0]; if (__pyx_t_11 < 0) __pyx_t_11 += __pyx_v_dL.shape[1]; /* "GPy/util/choleskies_cython.pyx":98 * for k in range(N-2, -1, -1): * n = N-k-1 * cblas.dsymv(uplo='u', n=&n, alpha=&alpha, a=&dL[k + 1, k + 1], lda=&N, x=&L[k + 1, k], incx=&incx, # <<<<<<<<<<<<<< * beta=&beta, y=&dL[k + 1, k], incy=&N) * */ __pyx_f_5scipy_6linalg_11cython_blas_dsymv(((char *)"u"), (&__pyx_v_n), (&__pyx_v_alpha), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_6 * __pyx_v_dL.strides[0]) )) + __pyx_t_7)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_8 * __pyx_v_L.strides[0]) )) + __pyx_t_9)) )))), (&__pyx_v_incx), (&__pyx_v_beta), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_10 * __pyx_v_dL.strides[0]) )) + __pyx_t_11)) )))), (&__pyx_v_N)); /* "GPy/util/choleskies_cython.pyx":101 * beta=&beta, y=&dL[k + 1, k], incy=&N) * * for i in xrange(0, N - k - 1): # <<<<<<<<<<<<<< * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] * */ __pyx_t_12 = ((__pyx_v_N - __pyx_v_k) - 1); __pyx_t_13 = __pyx_t_12; for (__pyx_t_14 = 0; __pyx_t_14 < __pyx_t_13; __pyx_t_14+=1) { __pyx_v_i = __pyx_t_14; /* "GPy/util/choleskies_cython.pyx":102 * * for i in xrange(0, N - k - 1): * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] # <<<<<<<<<<<<<< * * scale = 1.0 / L[k, k] */ __pyx_t_15 = ((__pyx_v_k + __pyx_v_i) + 1); __pyx_t_16 = ((__pyx_v_k + __pyx_v_i) + 1); if (__pyx_t_15 < 0) __pyx_t_15 += __pyx_v_dL.shape[0]; if (__pyx_t_16 < 0) __pyx_t_16 += __pyx_v_dL.shape[1]; __pyx_t_17 = ((__pyx_v_k + 1) + __pyx_v_i); __pyx_t_18 = __pyx_v_k; if (__pyx_t_17 < 0) __pyx_t_17 += __pyx_v_L.shape[0]; if (__pyx_t_18 < 0) __pyx_t_18 += __pyx_v_L.shape[1]; __pyx_t_19 = ((__pyx_v_k + 1) + __pyx_v_i); __pyx_t_20 = __pyx_v_k; if (__pyx_t_19 < 0) __pyx_t_19 += __pyx_v_dL.shape[0]; if (__pyx_t_20 < 0) __pyx_t_20 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_19 * __pyx_v_dL.strides[0]) )) + __pyx_t_20)) )) -= ((*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_15 * __pyx_v_dL.strides[0]) )) + __pyx_t_16)) ))) * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_17 * __pyx_v_L.strides[0]) )) + __pyx_t_18)) )))); } /* "GPy/util/choleskies_cython.pyx":104 * dL[k + 1 + i, k] -= dL[k + i+ 1, k + i + 1] * L[k + 1 + i, k] * * scale = 1.0 / L[k, k] # <<<<<<<<<<<<<< * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) * # */ __pyx_t_21 = __pyx_v_k; __pyx_t_22 = __pyx_v_k; if (__pyx_t_21 < 0) __pyx_t_21 += __pyx_v_L.shape[0]; if (__pyx_t_22 < 0) __pyx_t_22 += __pyx_v_L.shape[1]; __pyx_t_23 = (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_21 * __pyx_v_L.strides[0]) )) + __pyx_t_22)) ))); if (unlikely(__pyx_t_23 == 0)) { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif PyErr_SetString(PyExc_ZeroDivisionError, "float division"); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif __PYX_ERR(0, 104, __pyx_L1_error) } __pyx_v_scale = (1.0 / __pyx_t_23); /* "GPy/util/choleskies_cython.pyx":105 * * scale = 1.0 / L[k, k] * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) # <<<<<<<<<<<<<< * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) */ __pyx_t_24 = (__pyx_v_k + 1); __pyx_t_25 = __pyx_v_k; if (__pyx_t_24 < 0) __pyx_t_24 += __pyx_v_dL.shape[0]; if (__pyx_t_25 < 0) __pyx_t_25 += __pyx_v_dL.shape[1]; __pyx_f_5scipy_6linalg_11cython_blas_dscal((&__pyx_v_n), (&__pyx_v_scale), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_24 * __pyx_v_dL.strides[0]) )) + __pyx_t_25)) )))), (&__pyx_v_N)); /* "GPy/util/choleskies_cython.pyx":107 * cblas.dscal(&n, &scale , &dL[k + 1, k], &N) * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) # <<<<<<<<<<<<<< * dL[k, k] /= (2.0 * L[k, k]) * */ __pyx_t_26 = (__pyx_v_k + 1); __pyx_t_27 = __pyx_v_k; if (__pyx_t_26 < 0) __pyx_t_26 += __pyx_v_dL.shape[0]; if (__pyx_t_27 < 0) __pyx_t_27 += __pyx_v_dL.shape[1]; __pyx_t_28 = (__pyx_v_k + 1); __pyx_t_29 = __pyx_v_k; if (__pyx_t_28 < 0) __pyx_t_28 += __pyx_v_L.shape[0]; if (__pyx_t_29 < 0) __pyx_t_29 += __pyx_v_L.shape[1]; __pyx_t_30 = __pyx_v_k; __pyx_t_31 = __pyx_v_k; if (__pyx_t_30 < 0) __pyx_t_30 += __pyx_v_dL.shape[0]; if (__pyx_t_31 < 0) __pyx_t_31 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_30 * __pyx_v_dL.strides[0]) )) + __pyx_t_31)) )) -= __pyx_f_5scipy_6linalg_11cython_blas_ddot((&__pyx_v_n), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_26 * __pyx_v_dL.strides[0]) )) + __pyx_t_27)) )))), (&__pyx_v_N), (&(*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_28 * __pyx_v_L.strides[0]) )) + __pyx_t_29)) )))), (&__pyx_v_incx)); /* "GPy/util/choleskies_cython.pyx":108 * # * dL[k, k] -= cblas.ddot(&n, &dL[k + 1, k], &N, &L[k+1, k], &incx) * dL[k, k] /= (2.0 * L[k, k]) # <<<<<<<<<<<<<< * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): */ __pyx_t_32 = __pyx_v_k; __pyx_t_33 = __pyx_v_k; if (__pyx_t_32 < 0) __pyx_t_32 += __pyx_v_L.shape[0]; if (__pyx_t_33 < 0) __pyx_t_33 += __pyx_v_L.shape[1]; __pyx_t_34 = __pyx_v_k; __pyx_t_35 = __pyx_v_k; if (__pyx_t_34 < 0) __pyx_t_34 += __pyx_v_dL.shape[0]; if (__pyx_t_35 < 0) __pyx_t_35 += __pyx_v_dL.shape[1]; *((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_dL.data + __pyx_t_34 * __pyx_v_dL.strides[0]) )) + __pyx_t_35)) )) /= (2.0 * (*((double *) ( /* dim=1 */ ((char *) (((double *) ( /* dim=0 */ (__pyx_v_L.data + __pyx_t_32 * __pyx_v_L.strides[0]) )) + __pyx_t_33)) )))); } /* "GPy/util/choleskies_cython.pyx":85 * return dL_dK * * cdef void chol_backprop(int N, double[:, ::1] dL, double[:, ::1] L) nogil: # <<<<<<<<<<<<<< * cdef int i, k, n * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __Pyx_WriteUnraisable("GPy.util.choleskies_cython.chol_backprop", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1); __pyx_L0:; } /* "GPy/util/choleskies_cython.pyx":110 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ /* Python wrapper */ static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c = {"backprop_gradient_par_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_dL = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("backprop_gradient_par_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dL,&__pyx_n_s_L,0}; PyObject* values[2] = {0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dL)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_L)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, 1); __PYX_ERR(0, 110, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "backprop_gradient_par_c") < 0)) __PYX_ERR(0, 110, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); } __pyx_v_dL = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_dL.memview)) __PYX_ERR(0, 110, __pyx_L3_error) __pyx_v_L = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_L.memview)) __PYX_ERR(0, 110, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("backprop_gradient_par_c", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 110, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(__pyx_self, __pyx_v_dL, __pyx_v_L); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3GPy_4util_17choleskies_cython_8backprop_gradient_par_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_dL, __Pyx_memviewslice __pyx_v_L) { __Pyx_memviewslice __pyx_v_dL_dK = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_L_cont = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_N; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_RefNannySetupContext("backprop_gradient_par_c", 0); /* "GPy/util/choleskies_cython.pyx":111 * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig # <<<<<<<<<<<<<< * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] */ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_tril); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 111, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_dL_dK = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "GPy/util/choleskies_cython.pyx":112 * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) # <<<<<<<<<<<<<< * cdef int N = L.shape[0] * with nogil: */ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_ascontiguousarray); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_L, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_5 = __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(__pyx_t_1, PyBUF_WRITABLE); if (unlikely(!__pyx_t_5.memview)) __PYX_ERR(0, 112, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_L_cont = __pyx_t_5; __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; /* "GPy/util/choleskies_cython.pyx":113 * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] # <<<<<<<<<<<<<< * with nogil: * chol_backprop(N, dL_dK, L_cont) */ __pyx_v_N = (__pyx_v_L.shape[0]); /* "GPy/util/choleskies_cython.pyx":114 * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] * with nogil: # <<<<<<<<<<<<<< * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { /* "GPy/util/choleskies_cython.pyx":115 * cdef int N = L.shape[0] * with nogil: * chol_backprop(N, dL_dK, L_cont) # <<<<<<<<<<<<<< * return np.asarray(dL_dK) */ __pyx_f_3GPy_4util_17choleskies_cython_chol_backprop(__pyx_v_N, __pyx_v_dL_dK, __pyx_v_L_cont); } /* "GPy/util/choleskies_cython.pyx":114 * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) * cdef int N = L.shape[0] * with nogil: # <<<<<<<<<<<<<< * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "GPy/util/choleskies_cython.pyx":116 * with nogil: * chol_backprop(N, dL_dK, L_cont) * return np.asarray(dL_dK) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_asarray); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dL_dK, 2, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 116, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "GPy/util/choleskies_cython.pyx":110 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __PYX_XDEC_MEMVIEW(&__pyx_t_5, 1); __Pyx_AddTraceback("GPy.util.choleskies_cython.backprop_gradient_par_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_dL_dK, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L_cont, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_dL, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_L, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; PyArray_Descr *__pyx_t_7; PyObject *__pyx_t_8 = NULL; char *__pyx_t_9; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * * cdef int i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * cdef int i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 272, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * ndim = PyArray_NDIM(self) * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L7_bool_binop_done; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_ARRAY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L7_bool_binop_done:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 276, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * 2) * ((size_t)__pyx_v_ndim)))); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.strides = <Py_ssize_t*>PyObject_Malloc(sizeof(Py_ssize_t) * 2 * <size_t>ndim) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":286 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":287 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L9; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":289 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L9:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":296 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = <dtype>PyArray_DESCR(self) * cdef int offset */ __pyx_v_f = NULL; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":297 * cdef int t * cdef char* f = NULL * cdef dtype descr = <dtype>PyArray_DESCR(self) # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_7 = PyArray_DESCR(__pyx_v_self); __pyx_t_3 = ((PyObject *)__pyx_t_7); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":300 * cdef int offset * * info.obj = self # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(descr): */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(PyDataType_HASFIELDS(__pyx_v_descr) != 0)) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":303 * * if not PyDataType_HASFIELDS(descr): * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L15_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_L15_next_or:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":305 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L14_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L14_bool_binop_done:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_1)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 306, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":304 * if not PyDataType_HASFIELDS(descr): * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":307 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; case NPY_UBYTE: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":308 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ __pyx_v_f = ((char *)"B"); break; case NPY_SHORT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":309 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ __pyx_v_f = ((char *)"h"); break; case NPY_USHORT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":310 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ __pyx_v_f = ((char *)"H"); break; case NPY_INT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":311 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ __pyx_v_f = ((char *)"i"); break; case NPY_UINT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":312 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ __pyx_v_f = ((char *)"I"); break; case NPY_LONG: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":313 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ __pyx_v_f = ((char *)"l"); break; case NPY_ULONG: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":314 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ __pyx_v_f = ((char *)"L"); break; case NPY_LONGLONG: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":315 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ __pyx_v_f = ((char *)"q"); break; case NPY_ULONGLONG: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":316 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ __pyx_v_f = ((char *)"Q"); break; case NPY_FLOAT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":317 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ __pyx_v_f = ((char *)"f"); break; case NPY_DOUBLE: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":318 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ __pyx_v_f = ((char *)"d"); break; case NPY_LONGDOUBLE: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":319 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ __pyx_v_f = ((char *)"g"); break; case NPY_CFLOAT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":320 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ __pyx_v_f = ((char *)"Zf"); break; case NPY_CDOUBLE: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":321 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ __pyx_v_f = ((char *)"Zd"); break; case NPY_CLONGDOUBLE: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":322 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ __pyx_v_f = ((char *)"Zg"); break; case NPY_OBJECT: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":323 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_v_f = ((char *)"O"); break; default: /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":325 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 325, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 325, __pyx_L1_error) break; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":326 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":327 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":302 * info.obj = self * * if not PyDataType_HASFIELDS(descr): # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":329 * return * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)PyObject_Malloc(0xFF)); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":330 * else: * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":331 * info.format = <char*>PyObject_Malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":332 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 332, __pyx_L1_error) __pyx_v_f = __pyx_t_9; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":335 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fulfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":339 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) */ PyObject_Free(__pyx_v_info->format); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":338 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":341 * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * PyObject_Free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ PyObject_Free(__pyx_v_info->strides); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":340 * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * PyObject_Free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":337 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * PyObject_Free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 822, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":825 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 825, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":824 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline tuple PyDataType_SHAPE(dtype d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ __pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape # <<<<<<<<<<<<<< * else: * return () */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * * cdef inline tuple PyDataType_SHAPE(dtype d): * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< * return <tuple>d.subarray.shape * else: */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * return <tuple>d.subarray.shape * else: * return () # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_empty_tuple); __pyx_r = __pyx_empty_tuple; goto __pyx_L0; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< * if PyDataType_HASSUBARRAY(d): * return <tuple>d.subarray.shape */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":847 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":848 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 851, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 851, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 851, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":852 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 852, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 852, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":853 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 853, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 853, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 853, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 855, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 856, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":855 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":859 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":860 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 860, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 860, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":858 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":870 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 870, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":871 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":872 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":873 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":875 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":878 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 878, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (unlikely(__pyx_t_6)) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 880, __pyx_L1_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":879 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":883 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 883, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":884 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 884, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":885 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 885, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":886 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 886, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":887 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 887, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":888 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 888, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":889 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 889, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":890 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 890, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":891 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 891, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":892 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 892, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":893 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 893, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":894 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 894, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":895 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 895, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":896 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 896, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":897 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 897, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":898 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 898, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":899 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 899, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (likely(__pyx_t_6)) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":901 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 901, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 901, __pyx_L1_error) } __pyx_L15:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":902 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":877 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":906 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 906, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":851 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":907 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * return () * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1023 * * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< * PyArray_SetBaseObject(arr, base) * */ Py_INCREF(__pyx_v_base); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1024 * cdef inline void set_array_base(ndarray arr, object base): * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1022 * int _import_umath() except -1 * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * Py_INCREF(base) # important to do this before stealing the reference below! * PyArray_SetBaseObject(arr, base) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_v_base; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1027 * * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< * if base is NULL: * return None */ __pyx_v_base = PyArray_BASE(__pyx_v_arr); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ __pyx_t_1 = ((__pyx_v_base == NULL) != 0); if (__pyx_t_1) { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1029 * base = PyArray_BASE(arr) * if base is NULL: * return None # <<<<<<<<<<<<<< * return <object>base * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1028 * cdef inline object get_array_base(ndarray arr): * base = PyArray_BASE(arr) * if base is NULL: # <<<<<<<<<<<<<< * return None * return <object>base */ } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1030 * if base is NULL: * return None * return <object>base # <<<<<<<<<<<<<< * * # Versions of the import_* functions which are more suitable for */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_base)); __pyx_r = ((PyObject *)__pyx_v_base); goto __pyx_L0; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1026 * PyArray_SetBaseObject(arr, base) * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * base = PyArray_BASE(arr) * if base is NULL: */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_array", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1036 * cdef inline int import_array() except -1: * try: * _import_array() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.multiarray failed to import") */ __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1036, __pyx_L3_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1037 * try: * _import_array() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.multiarray failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1037, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1038, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1038, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1035 * # Cython code. * cdef inline int import_array() except -1: * try: # <<<<<<<<<<<<<< * _import_array() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1034 * # Versions of the import_* functions which are more suitable for * # Cython code. * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< * try: * _import_array() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_umath", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1042 * cdef inline int import_umath() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1042, __pyx_L3_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1043 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") * */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1043, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1044, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1044, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1041 * * cdef inline int import_umath() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1040 * raise ImportError("numpy.core.multiarray failed to import") * * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("import_ufunc", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); __Pyx_XGOTREF(__pyx_t_1); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); /*try:*/ { /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1048 * cdef inline int import_ufunc() except -1: * try: * _import_umath() # <<<<<<<<<<<<<< * except Exception: * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1048, __pyx_L3_error) /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ } __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L8_try_end; __pyx_L3_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1049 * try: * _import_umath() * except Exception: # <<<<<<<<<<<<<< * raise ImportError("numpy.core.umath failed to import") */ __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); if (__pyx_t_4) { __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 1049, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_6); __Pyx_GOTREF(__pyx_t_7); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1050 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< */ __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 1050, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_Raise(__pyx_t_8, 0, 0, 0); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __PYX_ERR(1, 1050, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1047 * * cdef inline int import_ufunc() except -1: * try: # <<<<<<<<<<<<<< * _import_umath() * except Exception: */ __Pyx_XGIVEREF(__pyx_t_1); __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); goto __pyx_L1_error; __pyx_L8_try_end:; } /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1046 * raise ImportError("numpy.core.umath failed to import") * * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< * try: * _import_umath() */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(2, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(2, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(2, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(2, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(2, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":381 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":384 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":383 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":382 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":385 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":380 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":387 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":378 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":391 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 393, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 393, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 393, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 393, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":394 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 394, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 394, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":393 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":396 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":389 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":401 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":400 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":403 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(2, 403, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 403, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 403, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 406, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":407 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":406 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":409 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 409, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":410 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 410, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":399 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 414, __pyx_L1_error) /* "View.MemoryView":413 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":416 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(2, 416, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 416, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 418, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":419 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 419, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 420, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":421 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 421, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":420 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":423 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 423, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":418 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":425 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":412 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":431 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 431, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":430 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 430, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":432 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 432, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":433 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":429 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":428 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":435 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":427 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 441, __pyx_L1_error) /* "View.MemoryView":442 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 442, __pyx_L1_error) /* "View.MemoryView":443 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 443, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 443, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":441 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 441, __pyx_L1_error) /* "View.MemoryView":437 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":447 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":452 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":455 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":457 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(2, 457, __pyx_L1_error) /* "View.MemoryView":456 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":458 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":454 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":460 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":462 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":464 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":463 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":466 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":471 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 471, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":470 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":472 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":475 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":445 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":478 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 478, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":479 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":477 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":484 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 484, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":487 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 487, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":489 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 489, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":494 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":493 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":495 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":490 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 490, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 491, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(2, 491, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":488 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":481 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":500 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 500, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":506 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 506, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 506, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":505 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":508 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 508, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 508, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(2, 510, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":510 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":511 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":497 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 516, __pyx_L1_error) /* "View.MemoryView":515 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":519 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":518 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":521 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":524 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":523 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":526 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":529 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":528 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":531 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":534 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":533 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":536 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":538 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":539 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":540 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":541 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":542 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":543 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":514 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":550 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 550, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 550, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":551 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 551, __pyx_L1_error) /* "View.MemoryView":552 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":549 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 566, __pyx_L1_error) /* "View.MemoryView":564 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":568 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 568, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 568, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__19, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":572 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":575 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 575, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":571 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":592 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":594 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 594, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":595 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 595, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":597 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":591 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":599 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":603 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":602 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":605 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":601 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":609 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 609, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":608 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 608, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":607 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":612 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":611 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":618 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":619 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 619, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":624 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":625 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 625, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":621 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":629 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":631 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":632 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 632, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":637 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 637, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":627 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":641 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":643 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":644 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 644, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":649 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 649, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":639 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":654 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 654, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":655 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":656 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":653 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":660 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":659 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":668 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 668, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":667 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":670 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":672 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":673 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":674 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 675, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 675, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 675, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 675, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 678, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__22); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 678, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":679 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":677 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":681 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__22); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 681, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":682 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":676 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":685 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 685, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(2, 685, __pyx_L1_error) /* "View.MemoryView":684 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":687 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":688 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 688, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":675 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":690 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 690, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":692 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 692, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__22); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":691 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":694 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 694, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":662 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":697 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(2, 699, __pyx_L1_error) /* "View.MemoryView":698 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":696 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":707 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":714 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":718 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(2, 718, __pyx_L1_error) } } #endif /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":721 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 721, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":722 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":720 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":724 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":725 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":731 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":732 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":737 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":738 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 742, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 742, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 742, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(2, 742, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":747 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 747, __pyx_L1_error) /* "View.MemoryView":744 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 744, __pyx_L1_error) /* "View.MemoryView":743 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":751 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":752 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":753 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":754 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":750 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":756 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 756, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 756, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 756, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":757 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 757, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 757, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 757, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":758 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 758, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 758, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 758, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":760 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":761 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":762 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":764 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 764, __pyx_L1_error) /* "View.MemoryView":770 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":742 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":774 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 774, __pyx_L1_error) } /* "View.MemoryView":775 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 775, __pyx_L1_error) } /* "View.MemoryView":773 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 773, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 773, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":772 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":779 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 778, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":778 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 778, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":706 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":826 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":825 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":828 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 828, __pyx_L1_error) /* "View.MemoryView":827 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":823 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":831 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":834 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 834, __pyx_L1_error) /* "View.MemoryView":833 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":839 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":840 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":838 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":844 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":843 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":846 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":842 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":837 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":848 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":851 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":855 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":857 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":856 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":854 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":858 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":853 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":862 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":861 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":864 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":867 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":866 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":871 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":874 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":873 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":877 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":876 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":880 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":881 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":882 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":886 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":885 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":888 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":893 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":892 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":895 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":896 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 895, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":891 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":898 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":890 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":900 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":803 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":908 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":909 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":913 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(2, 913, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(2, 913, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":914 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":912 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":916 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":917 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":918 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":922 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":924 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 924, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 924, __pyx_L1_error) /* "View.MemoryView":923 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":927 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 927, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 927, __pyx_L1_error) /* "View.MemoryView":926 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":929 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":931 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":930 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":933 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":906 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; /* "View.MemoryView":940 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":942 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":943 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":947 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":948 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":949 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":950 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":953 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 953, __pyx_L1_error) /* "View.MemoryView":952 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":955 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":939 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":973 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":972 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":977 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":976 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":979 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 979, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":975 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":983 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 983, __pyx_L1_error) /* "View.MemoryView":982 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":985 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 985, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":981 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":989 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":988 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1004 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1003 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1009 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1009, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1011 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1012 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1014 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1014, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1015 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1017 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1018 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1019 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1020 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1021 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1023 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1026 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1028 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1029 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1032 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1033 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1035 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1036 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1034 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1038 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1039 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1039, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1040 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1040, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1040, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1042 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1043 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1045 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":995 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1052 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1052, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1053 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1051 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1055 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1056 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1063 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1064 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1065 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1067 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1068 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1070 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1071 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1072 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1073 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1059 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1079 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1080 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1080, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1076 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1091 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1092 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1090 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1094 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1095 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1097 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1099 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1097, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1083 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1107 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1106 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1109 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1105 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1117 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1118 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1120 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1122 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1123 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1121 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1125 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1127 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1128 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1126 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1130 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1133 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1112 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1143 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1144 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1145 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1146 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1150 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1151 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1149 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1153 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1154 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1155 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1156 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1148 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1158 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1159 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1163 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1164 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1136 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1169 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1166 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1176 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1178 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1179 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1181 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1194 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1195 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1196 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1193 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1198 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1199 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1200 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1202 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1184 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; /* "View.MemoryView":1216 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1217 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1219 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1221 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1221, __pyx_L1_error) /* "View.MemoryView":1220 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1224 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1225 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1226 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1227 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1228 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1230 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1234 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1236 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1235 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1238 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1241 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1243 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1205 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1251 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1250 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(2, 1250, __pyx_L1_error) /* "View.MemoryView":1248 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1255 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1255, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(2, 1255, __pyx_L1_error) /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1260 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1260, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(2, 1260, __pyx_L1_error) /* "View.MemoryView":1259 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1262 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(2, 1262, __pyx_L1_error) } /* "View.MemoryView":1258 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; /* "View.MemoryView":1273 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1274 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1276 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1277 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1278 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1282 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1281 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1284 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1283 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1286 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1288 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1292 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1290 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1294 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1294, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1289 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1297 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error) /* "View.MemoryView":1296 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1301 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1304 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1304, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1305 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1299 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1311 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1310 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1312 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1317 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1318 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1319 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1320 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1321 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1315 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1307 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1326 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1326, __pyx_L1_error) /* "View.MemoryView":1327 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1327, __pyx_L1_error) /* "View.MemoryView":1323 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1329 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1330 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1331 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1333 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1334 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1265 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1341 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1343 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1344 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1345 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1346 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1348 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1349 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1350 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1351 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1337 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1364 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1363 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1359 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1371 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1368 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1378 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1381 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1380 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1383 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1379 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1385 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1386 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1388 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1374 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1397 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1398 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1400 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1394 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1408 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1409 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1412 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1413 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1414 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1411 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1416 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1417 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1419 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1404 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(2, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(2, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(2, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(2, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "GPy.util.choleskies_cython._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_choleskies_cython(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_choleskies_cython}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "choleskies_cython", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_n_s_GPy_util_choleskies_cython, __pyx_k_GPy_util_choleskies_cython, sizeof(__pyx_k_GPy_util_choleskies_cython), 0, 0, 1, 1}, {&__pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_k_GPy_util_choleskies_cython_pyx, sizeof(__pyx_k_GPy_util_choleskies_cython_pyx), 0, 0, 1, 0}, {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_L, __pyx_k_L, sizeof(__pyx_k_L), 0, 0, 1, 1}, {&__pyx_n_s_L_cont, __pyx_k_L_cont, sizeof(__pyx_k_L_cont), 0, 0, 1, 1}, {&__pyx_n_s_M, __pyx_k_M, sizeof(__pyx_k_M), 0, 0, 1, 1}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1}, {&__pyx_n_s_ascontiguousarray, __pyx_k_ascontiguousarray, sizeof(__pyx_k_ascontiguousarray), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient, __pyx_k_backprop_gradient, sizeof(__pyx_k_backprop_gradient), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient_par, __pyx_k_backprop_gradient_par, sizeof(__pyx_k_backprop_gradient_par), 0, 0, 1, 1}, {&__pyx_n_s_backprop_gradient_par_c, __pyx_k_backprop_gradient_par_c, sizeof(__pyx_k_backprop_gradient_par_c), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, {&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1}, {&__pyx_n_s_dL, __pyx_k_dL, sizeof(__pyx_k_dL), 0, 0, 1, 1}, {&__pyx_n_s_dL_dK, __pyx_k_dL_dK, sizeof(__pyx_k_dL_dK), 0, 0, 1, 1}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_empty, __pyx_k_empty, sizeof(__pyx_k_empty), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_flat, __pyx_k_flat, sizeof(__pyx_k_flat), 0, 0, 1, 1}, {&__pyx_n_s_flat_to_triang, __pyx_k_flat_to_triang, sizeof(__pyx_k_flat_to_triang), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, {&__pyx_n_s_m, __pyx_k_m, sizeof(__pyx_k_m), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mm, __pyx_k_mm, sizeof(__pyx_k_mm), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_ret, __pyx_k_ret, sizeof(__pyx_k_ret), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_triang_to_flat, __pyx_k_triang_to_flat, sizeof(__pyx_k_triang_to_flat), 0, 0, 1, 1}, {&__pyx_n_s_tril, __pyx_k_tril, sizeof(__pyx_k_tril), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 27, __pyx_L1_error) #if PY_MAJOR_VERSION >= 3 __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) __PYX_ERR(0, 101, __pyx_L1_error) #else __pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) __PYX_ERR(0, 101, __pyx_L1_error) #endif __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 272, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 856, __pyx_L1_error) __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 1038, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 400, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 609, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 828, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 272, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 276, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":306 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 306, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":856 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 856, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":880 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 880, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1038 * _import_array() * except Exception: * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_umath() except -1: */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 1038, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "../../../.conda/envs/bays-dev-py2/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":1044 * _import_umath() * except Exception: * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< * * cdef inline int import_ufunc() except -1: */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(2, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "View.MemoryView":414 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":491 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__16); __Pyx_GIVEREF(__pyx_tuple__16); /* "View.MemoryView":516 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(2, 516, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "View.MemoryView":566 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(2, 566, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "View.MemoryView":573 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__19 = PyTuple_New(1); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 573, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__19, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__19); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":678 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__22 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__22)) __PYX_ERR(2, 678, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__22); __Pyx_GIVEREF(__pyx_slice__22); /* "View.MemoryView":699 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(2, 699, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(2, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(2, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "GPy/util/choleskies_cython.pyx":14 * np.import_array() * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ __pyx_tuple__26 = PyTuple_Pack(9, __pyx_n_s_flat, __pyx_n_s_M, __pyx_n_s_D, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_ret, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_n_s_flat_to_triang, 14, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 14, __pyx_L1_error) /* "GPy/util/choleskies_cython.pyx":35 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ __pyx_tuple__28 = PyTuple_Pack(10, __pyx_n_s_L, __pyx_n_s_L, __pyx_n_s_D, __pyx_n_s_M, __pyx_n_s_N, __pyx_n_s_count, __pyx_n_s_flat, __pyx_n_s_d, __pyx_n_s_m, __pyx_n_s_mm); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__28); __Pyx_GIVEREF(__pyx_tuple__28); __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(1, 0, 10, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_n_s_triang_to_flat, 35, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 35, __pyx_L1_error) /* "GPy/util/choleskies_cython.pyx":51 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_tuple__30 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__30); __Pyx_GIVEREF(__pyx_tuple__30); __pyx_codeobj__31 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__30, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_n_s_backprop_gradient, 51, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__31)) __PYX_ERR(0, 51, __pyx_L1_error) /* "GPy/util/choleskies_cython.pyx":67 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_tuple__32 = PyTuple_Pack(7, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_N, __pyx_n_s_k, __pyx_n_s_j, __pyx_n_s_i); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__32); __Pyx_GIVEREF(__pyx_tuple__32); __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(2, 0, 7, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_n_s_backprop_gradient_par, 67, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(0, 67, __pyx_L1_error) /* "GPy/util/choleskies_cython.pyx":110 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ __pyx_tuple__34 = PyTuple_Pack(5, __pyx_n_s_dL, __pyx_n_s_L, __pyx_n_s_dL_dK, __pyx_n_s_L_cont, __pyx_n_s_N); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__34); __Pyx_GIVEREF(__pyx_tuple__34); __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(2, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_GPy_util_choleskies_cython_pyx, __pyx_n_s_backprop_gradient_par_c, 110, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 110, __pyx_L1_error) /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__36 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(2, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__36); __Pyx_GIVEREF(__pyx_tuple__36); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__37 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__37)) __PYX_ERR(2, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__37); __Pyx_GIVEREF(__pyx_tuple__37); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__38 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__38)) __PYX_ERR(2, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__38); __Pyx_GIVEREF(__pyx_tuple__38); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__39 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__39)) __PYX_ERR(2, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__39); __Pyx_GIVEREF(__pyx_tuple__39); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__40 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(2, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__40); __Pyx_GIVEREF(__pyx_tuple__40); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__41 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__41); __Pyx_GIVEREF(__pyx_tuple__41); __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 961, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 961, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 961, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 206, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 206, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 229, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 233, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 242, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Warn); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 918, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __pyx_t_1 = PyImport_ImportModule("scipy.linalg.cython_blas"); if (!__pyx_t_1) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "ddot", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_ddot, "__pyx_t_5scipy_6linalg_11cython_blas_d (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "dscal", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dscal, "void (int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_ImportFunction(__pyx_t_1, "dsymv", (void (**)(void))&__pyx_f_5scipy_6linalg_11cython_blas_dsymv, "void (char *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *, __pyx_t_5scipy_6linalg_11cython_blas_d *, __pyx_t_5scipy_6linalg_11cython_blas_d *, int *)") < 0) __PYX_ERR(0, 1, __pyx_L1_error) Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_RefNannyFinishContext(); return -1; } #if PY_MAJOR_VERSION < 3 #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC void #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #else #ifdef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyObject * #else #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcholeskies_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcholeskies_cython(void) #else __Pyx_PyMODINIT_FUNC PyInit_choleskies_cython(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_choleskies_cython(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_choleskies_cython(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; int __pyx_t_2; static PyThread_type_lock __pyx_t_3[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'choleskies_cython' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_choleskies_cython(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("choleskies_cython", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_GPy__util__choleskies_cython) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "GPy.util.choleskies_cython")) { if (unlikely(PyDict_SetItemString(modules, "GPy.util.choleskies_cython", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; (void)__Pyx_modinit_variable_import_code(); if (unlikely(__Pyx_modinit_function_import_code() != 0)) goto __pyx_L1_error; /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "GPy/util/choleskies_cython.pyx":7 * # Copyright James Hensman and Alan Saul 2015 * * import numpy as np # <<<<<<<<<<<<<< * from cython.parallel import prange, parallel * cimport numpy as np */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 7, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":12 * cimport scipy.linalg.cython_blas as cblas * * np.import_array() # <<<<<<<<<<<<<< * * def flat_to_triang(double[:, :] flat, int M): */ __pyx_t_2 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 12, __pyx_L1_error) /* "GPy/util/choleskies_cython.pyx":14 * np.import_array() * * def flat_to_triang(double[:, :] flat, int M): # <<<<<<<<<<<<<< * """take a matrix N x D and return a D X M x M array where * */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_1flat_to_triang, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_flat_to_triang, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":35 * return ret * * def triang_to_flat(double[:, :, :] L): # <<<<<<<<<<<<<< * cdef int D = L.shape[0] * cdef int M = L.shape[1] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_3triang_to_flat, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_triang_to_flat, __pyx_t_1) < 0) __PYX_ERR(0, 35, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":51 * return flat * * def backprop_gradient(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_5backprop_gradient, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient, __pyx_t_1) < 0) __PYX_ERR(0, 51, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":67 * return dL_dK * * def backprop_gradient_par(double[:,:] dL, double[:,:] L): # <<<<<<<<<<<<<< * cdef double[:,::1] dL_dK = np.tril(dL) * cdef int N = L.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_7backprop_gradient_par, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par, __pyx_t_1) < 0) __PYX_ERR(0, 67, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":110 * dL[k, k] /= (2.0 * L[k, k]) * * def backprop_gradient_par_c(double[:, :] dL, double[:, :] L): # <<<<<<<<<<<<<< * cdef double[:, ::1] dL_dK = np.tril(dL) # makes a copy, c-contig * cdef double[:, ::1] L_cont = np.ascontiguousarray(L) */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3GPy_4util_17choleskies_cython_9backprop_gradient_par_c, NULL, __pyx_n_s_GPy_util_choleskies_cython); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_backprop_gradient_par_c, __pyx_t_1) < 0) __PYX_ERR(0, 110, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "GPy/util/choleskies_cython.pyx":1 * #cython: wraparaound=False # <<<<<<<<<<<<<< * #cython: boundscheck=False * #cython: nonecheck=False */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__36, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__37, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__38, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__39, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__40, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_3[0] = PyThread_allocate_lock(); __pyx_t_3[1] = PyThread_allocate_lock(); __pyx_t_3[2] = PyThread_allocate_lock(); __pyx_t_3[3] = PyThread_allocate_lock(); __pyx_t_3[4] = PyThread_allocate_lock(); __pyx_t_3[5] = PyThread_allocate_lock(); __pyx_t_3[6] = PyThread_allocate_lock(); __pyx_t_3[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_3, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":545 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 545, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 545, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":991 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 991, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 991, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(2, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init GPy.util.choleskies_cython", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init GPy.util.choleskies_cython"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* DictGetItem */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { if (unlikely(PyTuple_Check(key))) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) { PyErr_SetObject(PyExc_KeyError, args); Py_DECREF(args); } } else { PyErr_SetObject(PyExc_KeyError, key); } } return NULL; } Py_INCREF(value); return value; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size) { PyObject *result = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif result = PyObject_GetAttrString(module, class_name); if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if ((size_t)basicsize < size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); goto bad; } else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. " "Expected %zd from C header, got %zd from PyObject", module_name, class_name, size, basicsize); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(result); return NULL; } #endif /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} else if (__Pyx_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS_RO | writable_flag, 3, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabsf(b.real) >= fabsf(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { float r = b.imag / b.real; float s = 1.0 / (b.real + b.imag * r); return __pyx_t_float_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { float r = b.real / b.imag; float s = 1.0 / (b.imag + b.real * r); return __pyx_t_float_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { if (b.imag == 0) { return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); } else { float denom = b.real * b.real + b.imag * b.imag; return __pyx_t_float_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(a, a); case 3: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, a); case 4: z = __Pyx_c_prod_float(a, a); return __Pyx_c_prod_float(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = powf(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2f(0, -1); } } else { r = __Pyx_c_abs_float(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* Declarations */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* Arithmetic */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } #if 1 static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else if (fabs(b.real) >= fabs(b.imag)) { if (b.real == 0 && b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); } else { double r = b.imag / b.real; double s = 1.0 / (b.real + b.imag * r); return __pyx_t_double_complex_from_parts( (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); } } else { double r = b.real / b.imag; double s = 1.0 / (b.imag + b.real * r); return __pyx_t_double_complex_from_parts( (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); } } #else static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { if (b.imag == 0) { return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); } else { double denom = b.real * b.real + b.imag * b.imag; return __pyx_t_double_complex_from_parts( (a.real * b.real + a.imag * b.imag) / denom, (a.imag * b.real - a.real * b.imag) / denom); } } #endif static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(a, a); case 3: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, a); case 4: z = __Pyx_c_prod_double(a, a); return __Pyx_c_prod_double(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } else if (b.imag == 0) { z.real = pow(a.real, b.real); z.imag = 0; return z; } else if (a.real > 0) { r = a.real; theta = 0; } else { r = -a.real; theta = atan2(0, -1); } } else { r = __Pyx_c_abs_double(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) ((enum NPY_TYPES) 0 - (enum NPY_TYPES) 1), const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_dc_double(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 2, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* FunctionImport */ #ifndef __PYX_HAVE_RT_ImportFunction #define __PYX_HAVE_RT_ImportFunction static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) { PyObject *d = 0; PyObject *cobj = 0; union { void (*fp)(void); void *p; } tmp; d = PyObject_GetAttrString(module, (char *)"__pyx_capi__"); if (!d) goto bad; cobj = PyDict_GetItemString(d, funcname); if (!cobj) { PyErr_Format(PyExc_ImportError, "%.200s does not export expected C function %.200s", PyModule_GetName(module), funcname); goto bad; } #if PY_VERSION_HEX >= 0x02070000 if (!PyCapsule_IsValid(cobj, sig)) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj)); goto bad; } tmp.p = PyCapsule_GetPointer(cobj, sig); #else {const char *desc, *s1, *s2; desc = (const char *)PyCObject_GetDesc(cobj); if (!desc) goto bad; s1 = desc; s2 = sig; while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; } if (*s1 != *s2) { PyErr_Format(PyExc_TypeError, "C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)", PyModule_GetName(module), funcname, sig, desc); goto bad; } tmp.p = PyCObject_AsVoidPtr(cobj);} #endif *f = tmp.fp; if (!(*f)) goto bad; Py_DECREF(d); return 0; bad: Py_XDECREF(d); return -1; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright @ 2008 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define FOURCC_DX10 0x30315844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #define DDSEXT_DIMENSION_TEX2D 0x00000003 #define DDSEXTFLAGS_CUBEMAP 0x00000004 typedef enum DXGI_FORMAT { DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT, DXGI_FORMAT_R32G32B32A32_UINT, DXGI_FORMAT_R32G32B32A32_SINT, DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT, DXGI_FORMAT_R32G32B32_UINT, DXGI_FORMAT_R32G32B32_SINT, DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_FORMAT_R16G16B16A16_UNORM, DXGI_FORMAT_R16G16B16A16_UINT, DXGI_FORMAT_R16G16B16A16_SNORM, DXGI_FORMAT_R16G16B16A16_SINT, DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT, DXGI_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_SINT, DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_D32_FLOAT_S8X24_UINT, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, DXGI_FORMAT_X32_TYPELESS_G8X24_UINT, DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_FORMAT_R10G10B10A2_UINT, DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R8G8B8A8_SNORM, DXGI_FORMAT_R8G8B8A8_SINT, DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT, DXGI_FORMAT_R16G16_UNORM, DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R16G16_SNORM, DXGI_FORMAT_R16G16_SINT, DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_D32_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R32_SINT, DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_D24_UNORM_S8_UINT, DXGI_FORMAT_R24_UNORM_X8_TYPELESS, DXGI_FORMAT_X24_TYPELESS_G8_UINT, DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM, DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R8G8_SNORM, DXGI_FORMAT_R8G8_SINT, DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_D16_UNORM, DXGI_FORMAT_R16_UNORM, DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R16_SNORM, DXGI_FORMAT_R16_SINT, DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_R8_UINT, DXGI_FORMAT_R8_SNORM, DXGI_FORMAT_R8_SINT, DXGI_FORMAT_A8_UNORM, DXGI_FORMAT_R1_UNORM, DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R8G8_B8G8_UNORM, DXGI_FORMAT_G8R8_G8B8_UNORM, DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC1_UNORM_SRGB, DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC2_UNORM_SRGB, DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM, DXGI_FORMAT_BC3_UNORM_SRGB, DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM, DXGI_FORMAT_BC4_SNORM, DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM, DXGI_FORMAT_BC5_SNORM, DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM, DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, DXGI_FORMAT_B8G8R8X8_TYPELESS, DXGI_FORMAT_B8G8R8X8_UNORM_SRGB, DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16, DXGI_FORMAT_BC6H_SF16, DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM, DXGI_FORMAT_BC7_UNORM_SRGB, DXGI_FORMAT_AYUV, DXGI_FORMAT_Y410, DXGI_FORMAT_Y416, DXGI_FORMAT_NV12, DXGI_FORMAT_P010, DXGI_FORMAT_P016, DXGI_FORMAT_420_OPAQUE, DXGI_FORMAT_YUY2, DXGI_FORMAT_Y210, DXGI_FORMAT_Y216, DXGI_FORMAT_NV11, DXGI_FORMAT_AI44, DXGI_FORMAT_IA44, DXGI_FORMAT_P8, DXGI_FORMAT_A8P8, DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_P208, DXGI_FORMAT_V208, DXGI_FORMAT_V408, DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE, DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE, DXGI_FORMAT_FORCE_UINT } DXGI_FORMAT; #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2, extFormat, extDimension, extFlags, extArraySize, extFlags2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _BC7Colors { unsigned char r[6], g[6], b[6], a[6]; } BC7Colors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef struct _BC7ModeInfo { unsigned char partition_bits, num_subsets, color_precision, alpha_precision, num_pbits, index_precision, index2_precision; } BC7ModeInfo; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 }; static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 }; static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34, 38, 43, 47, 51, 55, 60, 64 }; /* stores info for each mode of BC7 */ static const BC7ModeInfo BC7_mode_info[8] = { { 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */ { 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */ { 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */ { 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */ { 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */ { 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */ { 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */ { 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */ }; static const unsigned char BC7_partition_table[2][64][16] = { { /* BC7 Partition Set for 2 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 }, { 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 }, { 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 }, { 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 }, { 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 }, { 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 }, { 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 }, { 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 }, { 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 }, { 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 }, { 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 }, { 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 }, { 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 }, { 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 }, { 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 }, { 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 }, { 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 }, { 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 }, { 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 }, { 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 }, { 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 }, { 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 } }, { /* BC7 Partition Set for 3 Subsets */ { 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 }, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 }, { 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 }, { 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 }, { 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 }, { 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 }, { 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 }, { 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 }, { 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 }, { 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 }, { 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 }, { 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 }, { 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 }, { 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 }, { 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 }, { 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 }, { 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 }, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 }, { 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 }, { 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 }, { 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 }, { 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 }, { 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 }, { 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 }, { 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 }, { 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 }, { 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 }, { 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 }, { 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 }, { 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 }, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 }, { 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 }, { 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 }, { 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 }, { 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 } } }; static const unsigned char BC7_anchor_index_table[4][64] = { /* Anchor index values for the first subset */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Anchor index values for the second subset of two-subset partitioning */ { 15,15,15,15,15,15,15,15, 15,15,15,15,15,15,15,15, 15, 2, 8, 2, 2, 8, 8,15, 2, 8, 2, 2, 8, 8, 2, 2, 15,15, 6, 8, 2, 8,15,15, 2, 8, 2, 2, 2,15,15, 6, 6, 2, 6, 8,15,15, 2, 2, 15,15,15,15,15, 2, 2,15 }, /* Anchor index values for the second subset of three-subset partitioning */ { 3, 3,15,15, 8, 3,15,15, 8, 8, 6, 6, 6, 5, 3, 3, 3, 3, 8,15, 3, 3, 6,10, 5, 8, 8, 6, 8, 5,15,15, 8,15, 3, 5, 6,10, 8,15, 15, 3,15, 5,15,15,15,15, 3,15, 5, 5, 5, 8, 5,10, 5,10, 8,13,15,12, 3, 3 }, /* Anchor index values for the third subset of three-subset partitioning */ { 15, 8, 8, 3,15,15, 3, 8, 15,15,15,15,15,15,15, 8, 15, 8,15, 3,15, 8,15, 8, 3,15, 6,10,15,15,10, 8, 15, 3,15,10,10, 8, 9,10, 6,15, 8,15, 3, 6, 6, 8, 15, 3,15,15,15,15,15,15, 15,15,15,15, 3,15,15, 8 } }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static inline unsigned char GetSubsetIndex(unsigned char numSubsets, unsigned char partition_id,size_t pixelIndex) { if (numSubsets == 2) return BC7_partition_table[0][partition_id][pixelIndex]; if (numSubsets == 3) return BC7_partition_table[1][partition_id][pixelIndex]; return 0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ /* Read optional DX10 header if available */ if ((dds_info->pixelformat.flags & DDPF_FOURCC) && (dds_info->pixelformat.fourcc == FOURCC_DX10)) { dds_info->extFormat = ReadBlobLSBLong(image); dds_info->extDimension = ReadBlobLSBLong(image); dds_info->extFlags = ReadBlobLSBLong(image); dds_info->extArraySize = ReadBlobLSBLong(image); dds_info->extFlags2 = ReadBlobLSBLong(image); } else { dds_info->extFormat = 0; dds_info->extDimension = 0; dds_info->extFlags = 0; dds_info->extArraySize = 0; dds_info->extFlags2 = 0; } return(MagickTrue); } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; Quantum *q; ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static unsigned char GetBit(const unsigned char *block,size_t *start_bit) { size_t base, index; index=(*start_bit) >> 3; base=(*start_bit) - (index << 3); (*start_bit)++; if (index > 15) return(0); return((block[index] >> base) & 0x01); } static unsigned char GetBits(const unsigned char *block,size_t *start_bit, unsigned char num_bits) { size_t base, first_bits, index, next_bits; unsigned char ret; index=(*start_bit) >> 3; base=(*start_bit)-(index << 3); if (index > 15) return(0); if (base + num_bits > 8) { first_bits=8-base; next_bits=num_bits-first_bits; ret=((block[index] >> base) | (((block[index + 1]) & ((1u << next_bits) - 1)) << first_bits)); } else { ret=((block[index] >> base) & ((1 << num_bits) - 1)); } (*start_bit)+=num_bits; return(ret); } static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index, unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id) { size_t table_index; /* for first subset */ if (subset_index == 0) table_index=0; /* for second subset of two subset partitioning */ else if ((subset_index == 1) && (num_subsets == 2)) table_index=1; /* for second subset of three subset partitioning */ else if ((subset_index == 1) && (num_subsets == 3)) table_index=2; /* for third subset of three subset partitioning */ else table_index=3; if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex) return(MagickTrue); else return(MagickFalse); } static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block, size_t mode,size_t *start_bit) { MagickBooleanType has_alpha, has_pbits; unsigned char alpha_bits, color_bits, pbit, pbit0, pbit1; size_t num_subsets, i; num_subsets=(size_t) BC7_mode_info[mode].num_subsets; color_bits=BC7_mode_info[mode].color_precision; /* red */ for (i=0; i < num_subsets * 2; i++) endpoints->r[i]=GetBits(block,start_bit,color_bits); /* green */ for (i=0; i < num_subsets * 2; i++) endpoints->g[i]=GetBits(block,start_bit,color_bits); /* blue */ for (i=0; i < num_subsets * 2; i++) endpoints->b[i]=GetBits(block,start_bit,color_bits); /* alpha */ alpha_bits=BC7_mode_info[mode].alpha_precision; has_alpha=mode >= 4 ? MagickTrue : MagickFalse; if (has_alpha != MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=GetBits(block,start_bit,alpha_bits); } /* handle modes that have p bits */ has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7) ? MagickTrue : MagickFalse; if (has_pbits != MagickFalse) { for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= 1; endpoints->g[i] <<= 1; endpoints->b[i] <<= 1; endpoints->a[i] <<= 1; } /* mode 1 shares a p-bit for both endpoints */ if (mode == 1) { pbit0=GetBit(block,start_bit); pbit1=GetBit(block,start_bit); endpoints->r[0] |= pbit0; endpoints->g[0] |= pbit0; endpoints->b[0] |= pbit0; endpoints->r[1] |= pbit0; endpoints->g[1] |= pbit0; endpoints->b[1] |= pbit0; endpoints->r[2] |= pbit1; endpoints->g[2] |= pbit1; endpoints->b[2] |= pbit1; endpoints->r[3] |= pbit1; endpoints->g[3] |= pbit1; endpoints->b[3] |= pbit1; } else { for (i=0; i < num_subsets * 2; i++) { pbit=GetBit(block,start_bit); endpoints->r[i] |= pbit; endpoints->g[i] |= pbit; endpoints->b[i] |= pbit; endpoints->a[i] |= pbit; } } } /* 1 bit increased due to the pbit */ if (has_pbits != MagickFalse) { color_bits++; alpha_bits++; } /* color and alpha bit shifting so that MSB lies in bit 7 */ for (i=0; i < num_subsets * 2; i++) { endpoints->r[i] <<= (8 - color_bits); endpoints->g[i] <<= (8 - color_bits); endpoints->b[i] <<= (8 - color_bits); endpoints->a[i] <<= (8 - alpha_bits); endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits); endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits); endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits); endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> alpha_bits); } if (has_alpha == MagickFalse) { for (i=0; i < num_subsets * 2; i++) endpoints->a[i]=255; } } static MagickBooleanType ReadBC7Pixels(Image *image, const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { BC7Colors colors; Quantum *q; size_t mode, start_bit; ssize_t count, i, x, y; unsigned char a, alpha_indices[16], b, block[16], c0, c1, color_indices[16], g, index_prec, index2_prec, num_bits, num_subsets, partition_id, r, rotation, selector_bit, subset_indices[16], weight; magick_unreferenced(dds_info); memset(alpha_indices,0,sizeof(alpha_indices)); memset(block,0,sizeof(block)); memset(color_indices,0,sizeof(color_indices)); memset(subset_indices,0,sizeof(subset_indices)); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { size_t area; /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 16 bytes of data from the image */ count=ReadBlob(image,16,block); if (count != 16) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Get the mode of the block */ start_bit=0; while (start_bit <= 8 && !GetBit(block, &start_bit)) {} mode=start_bit-1; if (mode > 7) return(MagickFalse); num_subsets=BC7_mode_info[mode].num_subsets; partition_id=0; /* only these modes have more than 1 subset */ if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7)) { partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits); if (partition_id > 63) return(MagickFalse); } rotation=0; if ((mode == 4) || (mode == 5)) rotation=GetBits(block,&start_bit,2); selector_bit=0; if (mode == 4) selector_bit=GetBit(block, &start_bit); ReadEndpoints(&colors,block,mode,&start_bit); index_prec=BC7_mode_info[mode].index_precision; index2_prec=BC7_mode_info[mode].index2_precision; if ((mode == 4) && (selector_bit == 1)) { index_prec=3; alpha_indices[0]=GetBit(block,&start_bit); for (i = 1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,2); } /* get color and subset indices */ for (i=0; i < 16; i++) { subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i); num_bits=index_prec; if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id)) num_bits--; color_indices[i]=GetBits(block,&start_bit,num_bits); } /* get alpha indices if the block has it */ if ((mode == 5) || ((mode == 4) && (selector_bit == 0))) { alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1); for (i=1; i < 16; i++) alpha_indices[i]=GetBits(block,&start_bit,index2_prec); } /* Write the pixels */ area=MagickMin(MagickMin(4,image->columns-x)*MagickMin(4,image->rows-y), 16); for (i=0; i < (ssize_t) area; i++) { unsigned char c2; c0=2 * subset_indices[i]; c1=(2 * subset_indices[i]) + 1; c2=color_indices[i]; weight=64; /* Color Interpolation */ switch(index_prec) { case 2: if (c2 < sizeof(BC7_weight2)) weight=BC7_weight2[c2]; break; case 3: if (c2 < sizeof(BC7_weight3)) weight=BC7_weight3[c2]; break; default: if (c2 < sizeof(BC7_weight4)) weight=BC7_weight4[c2]; } r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6; g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6; b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6; a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; /* Interpolate alpha for mode 4 and 5 blocks */ if (mode == 4 || mode == 5) { unsigned char a0; a0=alpha_indices[i]; if (a0 < sizeof(BC7_weight2)) weight=BC7_weight2[a0]; if ((mode == 4) && (selector_bit == 0) && (a0 < sizeof(BC7_weight3))) weight=BC7_weight3[a0]; if ((c0 < sizeof(colors.a)) && (c1 < sizeof(colors.a))) a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6; } switch (rotation) { case 1: Swap(a,r); break; case 2: Swap(a,g); break; case 3: Swap(a,b); break; } SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image, const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32 || dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, const DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) alphaBits=1; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM || IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000)) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case FOURCC_DX10: { if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D) { ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } switch (dds_info.extFormat) { case DXGI_FORMAT_R8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G6R5_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G5R5A1_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_R8G8B8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8X8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_BC1_UNORM: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case DXGI_FORMAT_BC2_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case DXGI_FORMAT_BC3_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case DXGI_FORMAT_BC7_UNORM: case DXGI_FORMAT_BC7_UNORM_SRGB: { alpha_trait = BlendPixelTrait; compression = BC7Compression; decoder = ReadBC7; break; } default: { /* Unknown format */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP) cubemap = MagickTrue; num_images = dds_info.extArraySize; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteDDSImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { ssize_t x; ssize_t i, y, bx, by; const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { const Quantum *p; ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
extract_image_patches.h
/* Copyright 2018 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================*/ #ifndef DLK_FUNC_EXTRACT_IMAGE_PATCHES #define DLK_FUNC_EXTRACT_IMAGE_PATCHES #include <algorithm> #include "global.h" #include "tensor_view.h" #include "time_measurement.h" #include "pack_input_to_qwords.h" #include <limits.h> #ifdef USE_NEON #include <arm_neon.h> #endif template <typename T> void func_ExtractImagePatches( const TensorView<T, MemoryLayout::NHWC>& input, const TensorView<T, MemoryLayout::NHWC>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[2]; const T_UINT input_channels = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_channels = out_shape[3]; for(T_UINT kz = 0; kz < input_channels; ++kz) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch = kz + (ki * kernel_size + kj) * input_channels; const auto out_idx = wi * out_width * out_channels + wj * out_channels + ch; const auto in_idx = row * input_width * input_channels + col * input_channels + kz; output.data()[out_idx] = input.data()[in_idx]; } Measurement::Stop(); } inline void func_ExtractImagePatches( const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& input, const TensorView<QUANTIZED_PACKED, MemoryLayout::HWChBCl>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_width = in_shape[1]; const T_UINT input_channels = in_shape[2]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[0]; const T_UINT out_width = out_shape[1]; const T_UINT out_channels = out_shape[2]; T_UINT output_index = 0; if (out_channels < kernel_size * kernel_size) { int bit_shift = out_channels * QUANTIZED_PACKED::BitCount / (kernel_size * kernel_size); const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); std::fill(output.data(), output.data() + output.size(), QUANTIZED_PACKED(0)); for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; T_UINT ch = (ki * kernel_size + kj) * bit_shift; T_UINT ch_high = ch / QUANTIZED_PACKED::BitCount; T_UINT ch_low = ch % QUANTIZED_PACKED::BitCount; #ifdef USE_NEON const auto out_idx = wi * out_width * out_channels * bits_per_input + wj * out_channels * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_channels * bits_per_input + col * input_channels * bits_per_input; const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); const auto masked = vand_u32(vdup_n_u32(mask), in); #ifdef AARCH32 const auto shifted = vshl_u32(masked, vdup_n_s32(ch_low)); #else const auto shifted = vshl_n_u32(masked, ch_low); #endif const auto out_old = vld1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx)); const auto out_new = vorr_u32(out_old, shifted); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), out_new); #else for(T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto out_idx = wi * out_width * out_channels * bits_per_input + wj * out_channels * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_channels * bits_per_input + col * input_channels * bits_per_input + digit; output.data()[out_idx] |= QUANTIZED_PACKED((mask & input.data()[in_idx].Raw()) << ch_low); } #endif } } else { for(T_UINT ih = 0; ih < input_channels; ++ih) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; #ifdef USE_NEON const auto ch_high = ih + (ki * kernel_size + kj) * input_channels; const auto out_idx = wi * out_width * out_channels * bits_per_input + wj * out_channels * bits_per_input + ch_high * bits_per_input; const auto in_idx = row * input_width * input_channels * bits_per_input + col * input_channels * bits_per_input + ih * bits_per_input; const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), in); #else for(T_UINT digit = 0; digit < bits_per_input; ++digit) { const auto ch_high = ih + (ki * kernel_size + kj) * input_channels; const auto out_idx = wi * out_width * out_channels * bits_per_input + wj * out_channels * bits_per_input + ch_high * bits_per_input + digit; const auto in_idx = row * input_width * input_channels * bits_per_input + col * input_channels * bits_per_input + ih * bits_per_input + digit; output.data()[out_idx] = input.data()[in_idx]; } #endif } } Measurement::Stop(); } inline void func_ExtractImagePatches( const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& input, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, T_UINT kernel_size, T_UINT stride) { Measurement::Start("ExtractImagePatches"); const auto in_shape = input.get_shape(); const T_UINT input_height = in_shape[1]; const T_UINT input_width = in_shape[2]; const T_UINT input_channels = in_shape[0]; const T_UINT bits_per_input = in_shape[3]; const auto out_shape = output.get_shape(); const T_UINT out_height = out_shape[1]; const T_UINT out_width = out_shape[2]; const T_UINT out_channels = out_shape[0]; T_UINT output_index = 0; if (out_channels < kernel_size * kernel_size) { const T_UINT kernel_area = kernel_size * kernel_size; const T_UINT bit_shift = out_channels * QUANTIZED_PACKED::BitCount / kernel_area; const QUANTIZED_PACKED::base_t mask((QUANTIZED_PACKED::base_t(1) << bit_shift) - 1); const T_UINT lb_kernel_size = __builtin_ctz(kernel_size); const T_UINT kernel_mask = (1 << lb_kernel_size) - 1; #ifdef USE_NEON const auto shift_ref = vcombine_s32(vdup_n_s32(0), vdup_n_s32(bit_shift)); const auto add = vdupq_n_s32(bit_shift * 2); const auto mask_v = vdupq_n_u32(mask); #else const uint64_t mask64 = mask * 0x1'0000'0001ull; #endif const T_UINT blocks = kernel_area / out_channels; #pragma omp parallel for for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) #ifdef USE_NEON for(T_UINT k = 0; k < out_channels; ++k) { auto tmp = vdupq_n_u32(0); auto shift = shift_ref; for(T_UINT i = 0; i < blocks; i += 2) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = vld1q_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); const auto masked = vandq_u32(mask_v, in); const auto shifted = vshlq_u32(masked, shift); shift += add; tmp |= shifted; } const auto out = vorr_u32(vget_low_u32(tmp), vget_high_u32(tmp)); const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), out); } #else for(T_UINT k = 0; k < out_channels; ++k) { uint64_t out = 0; for(T_UINT i = 0; i < blocks; ++i) { T_UINT ki = (k * blocks + i) >> lb_kernel_size; T_UINT kj = (k * blocks + i) & kernel_mask; T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto in_idx = row * input_width * bits_per_input + col * bits_per_input; const auto in = *reinterpret_cast<uint64_t*>(input.data() + in_idx); out |= (mask64 & in) << (i * bit_shift); } const auto out_idx = k * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; *reinterpret_cast<uint64_t*>(output.data() + out_idx) = out; } #endif } else { for(T_UINT ih = 0; ih < input_channels; ++ih) for(T_UINT wi = 0; wi < out_height; wi++) for(T_UINT wj = 0; wj < out_width; wj++) for(T_UINT ki = 0; ki < kernel_size; ki++) for(T_UINT kj = 0; kj < kernel_size; kj++) { T_INT row = (wi * stride) + ki; T_INT col = (wj * stride) + kj; const auto ch_high = ih + (ki * kernel_size + kj) * input_channels; const auto out_idx = ch_high * out_height * out_width * bits_per_input + wi * out_width * bits_per_input + wj * bits_per_input; const auto in_idx = ih * input_height * input_width * bits_per_input + row * input_width * bits_per_input + col * bits_per_input; #ifdef USE_NEON const auto in = vld1_u32(reinterpret_cast<uint32_t*>(input.data() + in_idx)); vst1_u32(reinterpret_cast<uint32_t*>(output.data() + out_idx), in); #else *reinterpret_cast<uint64_t*>(output.data() + out_idx) = *reinterpret_cast<uint64_t*>(input.data() + in_idx); #endif } } Measurement::Stop(); } #endif // DLK_FUNC_EXTRACT_IMAGE_PATCHES
fws_parfor.c
/* Standard implementation of the Floyd-Warshall Algorithm using OpenMP parallel for. */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "util.h" #include <stdio.h> #include <omp.h> inline int min(int a, int b); int main(int argc, char **argv) { int **A; int i,j,k; struct timeval t1, t2; double time; int N=1024; if (argc != 2) { fprintf(stdout,"Usage: %s N\n", argv[0]); exit(0); } N=atoi(argv[1]); A = (int **) malloc(N*sizeof(int *)); for(i=0; i<N; i++) A[i] = (int *) malloc(N*sizeof(int)); graph_init_random(A,-1,N,128*N); gettimeofday(&t1,0); for(k=0;k<N;k++) #pragma omp parallel for private(i, j) shared(A, k, N) for(i=0; i<N; i++) for(j=0; j<N; j++) A[i][j]=min(A[i][j], A[i][k] + A[k][j]); gettimeofday(&t2,0); time=(double)((t2.tv_sec-t1.tv_sec)*1000000+t2.tv_usec-t1.tv_usec)/1000000; printf("FW,%d,%.4f\n", N, time); /* for(i=0; i<N; i++) for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]); */ return 0; } inline int min(int a, int b) { if(a<=b)return a; else return b; }