source
stringlengths
3
92
c
stringlengths
26
2.25M
Proyek Akhir Semester Kelompok 12 Pemrograman Lanjut-2.c
//KryptoPal - Simulator Mining //Oleh: Kelompok 12 untuk Kuliah Pemrograman Lanjut Teknik Komputer FTUI (ENCE602003) // Afif Yudhistira - 2006522631 // Binar Qalbu Cimuema - 2006526296 // M. Hafiz Widyawan - 2006468762 // Syamsul Erisandy Arief - 2006577611 //Kontribusi: // Afif Yudhistira: fungsi mining // Binar Qalbu Cimuema: fungsi main (dengan bantuan Syamsul Erisandy Arief) // M. Hafiz Widyawan: fungsi dispPanduan dan display menu // Syamsul Erisandy Arief: pengaturan akun (dengan bantuan Binar Qalbu Cimuema dan M. Hafiz Widyawan) #include <stdlib.h> #include <string.h> #include <stdio.h> #include <omp.h> struct listAkun { int urutan; char nama[100]; char uname[100]; char pass[100]; float saldo; struct listAkun * next; }* head; void dispPanduan() { printf("\tKryptoPal - Simulator Mining Cryptocurrency\n"); printf("\tSelamat datang di halaman panduan program kami.\n"); printf("\tPanduan Program\n"); printf("\tUntuk menggunakan program ini, harap mengikuti instruksi di bawah ini:\n"); printf("\tOpsi 1\n"); printf("\t Pada opsi ini, Anda akan membuat akun baru.\n"); printf("\t Jika membuat akun baru terpilih, data data yang diminta adalah:\n"); printf("\t - Nama, yaitu nama anda.\n"); printf("\t - Username, yaitu nama pengguna Anda.\n"); printf("\t - Password, yaitu kata sandi Anda\n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 2\n"); printf("\t Pada opsi ini, Anda bisa mencari akun yang sudah Anda buat sebelumnya.\n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 3\n"); printf("\t Pada opsi ini, Anda bisa menghapus akun yang sudah Anda buat sebelumnya.\n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 4\n"); printf("\t Pada opsi ini, Anda dapat mengubah username atau kata sandi akun Anda.\n"); printf("\t Pilih opsi 1 untuk mengubah username dan opsi 2 untuk mengubah kata sandi.\n"); printf("\t Anda akan diminta username atau kata sandi yang lama serta barunya.\n"); printf("\t Penggantian berhasil dilakukan jika muncul kata 'Berhasil' pada program.\n"); printf("\t Setelah itu, untuk kembali ke tampilan user, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 5\n"); printf("\t Pada opsi ini, Anda bisa melihat semua akun yang sudah Anda buat sebelumnya.\n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 6\n"); printf("\t Pada opsi ini, Anda bisa melakukan mining Cryptocurrency.\n"); printf("\t Anda akan diminta untuk memasukkan nomor urut Anda dan jumlah proses mining yang ingin Anda lakukan\n"); printf("\t Disarankan untuk melakukan 10.000 proses ke atas.\n"); printf("\t Mining berhasil dilakukan jika muncul kata 'Berhasil' pada program.\n"); printf("\t PERINGATAN : Mining akan membebankan cpu hingga 100%* sampai proses mining selesai.\n"); printf("\t *CATATAN : Beban 100% hanya didapatkan jika penggunakan intel .\n"); printf("\t Setelah itu, untuk kembali ke tampilan user, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 7\n"); printf("\t Pada opsi ini, Anda bisa menyimpan data akun anda ke file.txt.\n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 8\n"); printf("\t Pada opsi ini, akan dimunculkan panduan dalam menjalankan program. \n"); printf("\t Setelah itu, untuk kembali ke tampilan utama, Anda dapat memasukkan angka 0.\n"); printf("\tOpsi 0\n"); printf("\t Pada opsi ini, Anda dapat keluar dari program.\n"); } void displayMenu() { printf("==========KryptoPal - Cryptocurrency Mining Simulation==========\n"); printf("1. Buat Akun\n"); printf("2. Pencarian Akun\n"); printf("3. Hapus Akun\n"); printf("4. Update Akun\n"); printf("5. Display Akun\n"); printf("6. Mulai Mining\n"); printf("7. Simpan Data Akun ke File .txt\n"); printf("8. Display Panduan\n"); printf("0. Keluar\n"); printf("\nSilahkan pilih opsi sesuai nomor: "); } void masuk(int urutan, char * nama, char * uname, char * pass, float saldo) { struct listAkun * insert = (struct listAkun * ) malloc(sizeof(struct listAkun)); insert -> urutan = urutan; strcpy(insert -> nama, nama); strcpy(insert -> uname, uname); strcpy(insert -> pass, pass); insert -> saldo = saldo; insert -> next = NULL; if (head == NULL) { head = insert; } else { insert -> next = head; head = insert; } } void cari(int urutan) { struct listAkun * temp = head; while (temp != NULL) { if (temp -> urutan == urutan) { printf("Detail Akun %d\n", temp -> urutan); printf("Name: %s\n", temp -> nama); printf("Username: %s\n", temp -> uname); printf("Saldo: %.8f\n", temp -> saldo); return; } temp = temp -> next; } printf("Data akun tidak ditemukan\n"); } void update(int urutan) { struct listAkun * temp = head; while (temp != NULL) { if (temp -> urutan == urutan) { printf("Akun %d ditemukan\n", urutan); printf("Enter new name: "); scanf("%s", temp -> nama); printf("Enter new username: "); scanf("%s", temp -> uname); printf("Enter new password: "); scanf("%s", temp -> pass); return; } temp = temp -> next; } printf("Data akun tidak ditemukan\n"); } void updatesaldo(int urutan, float saldobaru) { struct listAkun * temp = head; while (temp != NULL) { if (temp -> urutan == urutan) { temp -> saldo += saldobaru; printf("Saldo Anda telah berhasil diperbarui!\n"); return; } temp = temp -> next; } printf("Data akun tidak ditemukan\n"); } void hapus(int urutan) { struct listAkun * temp1 = head; struct listAkun * temp2 = head; while (temp1 != NULL) { if (temp1 -> urutan == urutan) { if (temp1 == temp2) { head = head -> next; free(temp1); } else { temp2 -> next = temp1 -> next; free(temp1); } printf("Data telah dihapus\n"); return; } temp2 = temp1; temp1 = temp1 -> next; } printf("Data akun tidak ditemukan\n"); } void display() { struct listAkun * temp = head; while (temp != NULL) { printf("No. Urut %d\n", temp -> urutan); printf("Nama: %s\n", temp -> nama); printf("Username: %s\n", temp -> uname); printf("Password: %s\n", temp -> pass); printf("Saldo: %0.8f\n\n", temp -> saldo); temp = temp -> next; } } int mining(int limit) { double start, end; double runTime; start = omp_get_wtime(); int num = 1, primes = 0; #pragma omp parallel for schedule(dynamic) reduction(+: primes) for (num = 1; num <= limit; num++) { int i = 2; while (i <= num) { if (num % i == 0) break; i++; } if (i == num) primes++; } end = omp_get_wtime(); runTime = end - start; printf("Mining telah menghitung %d bilangan prima dibawah %d dalam %g detik\n", primes, limit, runTime); } int main() { head = NULL; int opsi; char nama[100]; char uname[100]; char pass[100]; char address[100]; char temp; int urutan = 0, limcount, urutemp; float saldo, saldonew, limit; FILE * kpw, * kpr; kpw = fopen("D:\\KryptoPal.txt", "a"); kpr = fopen("D:\\KryptoPal.txt", "r"); do { displayMenu(); printf("\nMasukkan opsi: "); scanf("%d", & opsi); system("cls"); switch (opsi) { case 1: printf("Enter name: "); scanf("%s", nama); fflush(stdin); printf("Enter username: "); scanf("%s", uname); printf("Enter password: "); scanf("%s", pass); saldo = 0; urutan += 1; masuk(urutan, nama, uname, pass, saldo); break; case 2: printf("Masukkan no. urut yang ingin dicari: "); scanf("%d", & urutan); cari(urutan); break; case 3: printf("Masukkan no.urut yang ingin dihapus: "); scanf("%d", & urutan); hapus(urutan); break; case 4: printf("Masukkan no.urut yang ingin diperbarui: "); scanf("%d", & urutan); update(urutan); break; case 5: display(); break; case 6: printf("Masukkan no. urutan Anda: "); scanf("%d", & urutemp); printf("Masukkan jumlah proses yang Anda ingin lakukan (proses bil.prima): "); scanf("%f", & limit); mining(limit); limcount += limit; saldonew = limit / 200000; updatesaldo(urutemp, saldonew); break; case 7: fprintf(kpw, "\nDetail Akun %d:\nName: %s\nUsername: %s\nPassword: %s\nSaldo%d: %.8f\nEOA%d", urutan, nama, uname, pass, urutan, saldonew, urutan); fclose(kpw); printf("Data Anda telah berhasil disimpan!\n"); break; case 8: dispPanduan(); break; } } while (opsi != 0); return 0; }
hdp.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <float.h> #include <inttypes.h> #include "hdp.h" #include "hdp_math_utils.h" #include "sonLib.h" #include "ranlib.h" #define N_IG_NUM_PARAMS 4 #ifndef MINUS_INF #define MINUS_INF -0.5 * DBL_MAX #endif #ifndef M_PI #define M_PI 3.14159265358979323846264338 #endif typedef struct Factor Factor; typedef struct DirichletProcess DirichletProcess; typedef enum FactorType { BASE, MIDDLE, DATA_PT } FactorType; struct Factor { FactorType factor_type; struct Factor* parent; stSet* children; double* factor_data; struct DirichletProcess* dp; }; struct DirichletProcess { int64_t id; struct HierarchicalDirichletProcess* hdp; double* gamma; int64_t depth; struct DirichletProcess* parent; stList* children; stSet* factors; int64_t num_factor_children; double base_factor_wt; double* posterior_predictive; double* spline_slopes; double cached_factor_mean; double cached_factor_sum_sq_dev; int64_t cached_factor_size; bool observed; }; struct HierarchicalDirichletProcess { bool finalized; double* data; int64_t* data_pt_dp_id; int64_t data_length; struct DirichletProcess* base_dp; struct DirichletProcess** dps; int64_t num_dps; // normal-inverse gamma parameters double mu; double nu; double two_alpha; double beta; double* sampling_grid; int64_t grid_length; int64_t samples_taken; bool splines_finalized; // TODO: replace this with my new offset log gamma memo //struct SumOfLogsMemo* log_sum_memo; int64_t depth; bool sample_gamma; double* gamma; double* gamma_alpha; double* gamma_beta; double* w_aux_vector; bool* s_aux_vector; stSet* distr_metric_memos; }; struct DistributionMetricMemo { int64_t num_distrs; double* memo_matrix; HierarchicalDirichletProcess* hdp; double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t); }; bool is_structure_finalized(HierarchicalDirichletProcess* hdp) { return hdp->finalized; } bool is_gamma_random(HierarchicalDirichletProcess* hdp) { return hdp->sample_gamma; } bool is_sampling_finalized(HierarchicalDirichletProcess* hdp) { return hdp->splines_finalized; } int64_t get_num_dir_proc(HierarchicalDirichletProcess* hdp) { return hdp->num_dps; } int64_t get_depth(HierarchicalDirichletProcess* hdp) { return hdp->depth; } int64_t get_num_data(HierarchicalDirichletProcess* hdp) { return hdp->data_length; } double* get_data_copy(HierarchicalDirichletProcess* hdp) { int64_t data_length = hdp->data_length; double* data = (double*) malloc(sizeof(double) * data_length); for (int64_t i = 0; i < data_length; i++) { data[i] = hdp->data[i]; } return data; } int64_t* get_data_pt_dp_ids_copy(HierarchicalDirichletProcess* hdp) { int64_t data_length = hdp->data_length; int64_t* dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length); for (int64_t i = 0; i < data_length; i++) { dp_ids[i] = hdp->data_pt_dp_id[i]; } return dp_ids; } double* get_gamma_params_copy(HierarchicalDirichletProcess* hdp) { int64_t depth = hdp->depth; double* gamma_params = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { gamma_params[i] = hdp->gamma[i]; } return gamma_params; } double get_mu(HierarchicalDirichletProcess* hdp) { return hdp->mu; } double get_nu(HierarchicalDirichletProcess* hdp) { return hdp->nu; } double get_alpha(HierarchicalDirichletProcess* hdp) { return hdp->two_alpha / 2.0; } double get_beta(HierarchicalDirichletProcess* hdp) { return hdp->beta; } int64_t get_grid_length(HierarchicalDirichletProcess* hdp) { return hdp->grid_length; } double* get_sampling_grid_copy(HierarchicalDirichletProcess* hdp) { int64_t grid_length = hdp->grid_length; double* sampling_grid = (double*) malloc(sizeof(double) * grid_length); for (int64_t i = 0; i < grid_length; i++) { sampling_grid[i] = hdp->sampling_grid[i]; } return sampling_grid; } double* get_gamma_alpha_params_copy(HierarchicalDirichletProcess* hdp) { if (!hdp->sample_gamma) { fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters."); exit(EXIT_FAILURE); } int64_t depth = hdp->depth; double* gamma_alpha = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { gamma_alpha[i] = hdp->gamma_alpha[i]; } return gamma_alpha; } double* get_gamma_beta_params_copy(HierarchicalDirichletProcess* hdp) { if (!hdp->sample_gamma) { fprintf(stderr, "Hierarchical Dirichlet process is not sampling gamma parameters."); exit(EXIT_FAILURE); } int64_t depth = hdp->depth; double* gamma_beta = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { gamma_beta[i] = hdp->gamma_beta[i]; } return gamma_beta; } int64_t get_dir_proc_num_factors(HierarchicalDirichletProcess* hdp, int64_t dp_id) { if (dp_id < 0 || dp_id >= hdp->num_dps) { fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n"); exit(EXIT_FAILURE); } DirichletProcess* dp = hdp->dps[dp_id]; return stSet_size(dp->factors); } int64_t get_dir_proc_parent_id(HierarchicalDirichletProcess* hdp, int64_t dp_id) { if (dp_id < 0 || dp_id >= hdp->num_dps) { fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n"); exit(EXIT_FAILURE); } DirichletProcess* dp = hdp->dps[dp_id]; if (dp->parent == NULL) { return -1; } else { return dp->parent->id; } } DistributionMetricMemo* new_distr_metric_memo(HierarchicalDirichletProcess* hdp, double (*metric_func) (HierarchicalDirichletProcess*, int64_t, int64_t)) { DistributionMetricMemo* memo = (DistributionMetricMemo*) malloc(sizeof(DistributionMetricMemo)); int64_t num_dps = hdp->num_dps; memo->num_distrs = num_dps; int64_t num_entries = ((num_dps - 1) * num_dps) / 2; double* memo_matrix = (double*) malloc(sizeof(double) * num_entries); memo->memo_matrix = memo_matrix; for (int64_t i = 0; i < num_entries; i++) { memo_matrix[i] = -1.0; } memo->hdp = hdp; memo->metric_func = metric_func; stSet_insert(hdp->distr_metric_memos, memo); return memo; } void destroy_distr_metric_memo(void* memo) { DistributionMetricMemo* metric_memo = (DistributionMetricMemo*) memo; free(metric_memo->memo_matrix); free(metric_memo); } void cache_base_factor_params(Factor* fctr, double mu, double nu, double two_alpha, double beta, double log_post_term) { if (fctr->factor_type != BASE) { fprintf(stderr, "Can only cache parameters for base factors.\n"); exit(EXIT_FAILURE); } double* param_array = fctr->factor_data; param_array[0] = mu; param_array[1] = nu; param_array[2] = two_alpha; param_array[3] = beta; param_array[4] = log_post_term; } Factor* new_base_factor(HierarchicalDirichletProcess* hdp) { Factor* fctr = (Factor*) malloc(sizeof(Factor)); fctr->factor_type = BASE; fctr->factor_data = (double*) malloc(sizeof(double) * (N_IG_NUM_PARAMS + 1)); cache_base_factor_params(fctr, hdp->mu, hdp->nu, hdp->two_alpha, hdp->beta, 1.0); fctr->parent = NULL; fctr->children = stSet_construct(); DirichletProcess* base_dp = hdp->base_dp; fctr->dp = base_dp; stSet_insert(base_dp->factors, (void*) fctr); return fctr; } Factor* new_middle_factor(DirichletProcess* dp) { if (dp->parent == NULL) { fprintf(stderr, "Attempted to create middle factor in root Dirichlet process.\n"); exit(EXIT_FAILURE); } Factor* fctr = (Factor*) malloc(sizeof(Factor)); fctr->factor_type = MIDDLE; fctr->factor_data = NULL; // note: assigning to parent handled externally fctr->parent = NULL; fctr->children = stSet_construct(); fctr->dp = dp; stSet_insert(dp->factors, (void*) fctr); return fctr; } Factor* new_data_pt_factor(HierarchicalDirichletProcess* hdp, int64_t data_pt_idx) { Factor* fctr = (Factor*) malloc(sizeof(Factor)); fctr->factor_type = DATA_PT; fctr->factor_data = &(hdp->data[data_pt_idx]); // note: assigning to parent handled externally fctr->parent = NULL; fctr->children = NULL; fctr->dp = NULL; return fctr; } void destroy_factor(Factor* fctr) { stSet* children = fctr->children; if (children != NULL) { if (stSet_size(children) > 0) { fprintf(stderr, "Attempted to destroy factor that still has children.\n"); exit(EXIT_FAILURE); } stSet_destruct(children); } Factor* parent = fctr->parent; if (parent != NULL) { stSet_remove(parent->children, (void*) fctr); (parent->dp->num_factor_children)--; if (stSet_size(parent->children) == 0) { destroy_factor(parent); } } if (fctr->factor_type == BASE) { free(fctr->factor_data); } DirichletProcess* dp = fctr->dp; if (dp != NULL) { stSet_remove(dp->factors, (void*) fctr); } free(fctr); } Factor* get_base_factor(Factor* fctr) { while (fctr->factor_type != BASE) { fctr = fctr->parent; if (fctr == NULL) { break; } } return fctr; } double get_factor_data_pt(Factor* fctr) { if (fctr->factor_type != DATA_PT) { fprintf(stderr, "Attempted to access data point from non-leaf factor.\n"); exit(EXIT_FAILURE); } return *(fctr->factor_data); } void get_factor_sum_internal(Factor* fctr, double* sum, int64_t* num_data) { if (fctr->factor_type == DATA_PT) { *sum += get_factor_data_pt(fctr); // TODO: there should be a way to use the parent's counters instead of recounting the data pts (*num_data)++; } else { stSetIterator* child_iter = stSet_getIterator(fctr->children); Factor* child_fctr = (Factor*) stSet_getNext(child_iter); while (child_fctr != NULL) { get_factor_sum_internal(child_fctr, sum, num_data); child_fctr = (Factor*) stSet_getNext(child_iter); } stSet_destructIterator(child_iter); } } void get_factor_ssd_internal(Factor* fctr, double center, double* sum_sq_dev) { if (fctr->factor_type == DATA_PT) { double dev = get_factor_data_pt(fctr) - center; *sum_sq_dev += dev * dev; } else { stSetIterator* child_iter = stSet_getIterator(fctr->children); Factor* child_fctr = (Factor*) stSet_getNext(child_iter); while (child_fctr != NULL) { get_factor_ssd_internal(child_fctr, center, sum_sq_dev); child_fctr = (Factor*) stSet_getNext(child_iter); } stSet_destructIterator(child_iter); } } void get_factor_stats(Factor* fctr, double* mean_out, double* sum_sq_dev_out, int64_t* num_data_out) { *mean_out = 0.0; *sum_sq_dev_out = 0.0; *num_data_out = 0; get_factor_sum_internal(fctr, mean_out, num_data_out); *mean_out /= (double) *num_data_out; get_factor_ssd_internal(fctr, *mean_out, sum_sq_dev_out); } void add_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) { double* param_array = fctr->factor_data; double mu_prev = param_array[0]; double nu_prev = param_array[1]; double two_alpha_prev = param_array[2]; double beta_prev = param_array[3]; double nu_post = nu_prev + num_data; double mu_post = (mu_prev * nu_prev + mean * num_data) / nu_post; double two_alpha_post = two_alpha_prev + num_data; double mean_dev = mean - mu_prev; double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post; double beta_post = beta_prev + .5 * (sum_sq_devs + sq_mean_dev); double log_post_term = log_posterior_conditional_term(nu_post, two_alpha_post, beta_post);//, //fctr->dp->hdp->log_sum_memo); cache_base_factor_params(fctr, mu_post, nu_post, two_alpha_post, beta_post, log_post_term); } void remove_update_base_factor_params(Factor* fctr, double mean, double sum_sq_devs, double num_data) { double* param_array = fctr->factor_data; double mu_post = param_array[0]; double nu_post = param_array[1]; double two_alpha_post = param_array[2]; double beta_post = param_array[3]; double nu_prev = nu_post - num_data; double mu_prev = (mu_post * nu_post - mean * num_data) / nu_prev; double two_alpha_prev = two_alpha_post - num_data; double mean_dev = mean - mu_prev; double sq_mean_dev = nu_prev * num_data * mean_dev * mean_dev / nu_post; double beta_prev = beta_post - 0.5 * (sum_sq_devs + sq_mean_dev); double log_post_term = log_posterior_conditional_term(nu_prev, two_alpha_prev, beta_prev);//, //fctr->dp->hdp->log_sum_memo); cache_base_factor_params(fctr, mu_prev, nu_prev, two_alpha_prev, beta_prev, log_post_term); } double factor_parent_joint_log_likelihood(Factor* fctr, Factor* parent) { Factor* base_fctr = get_base_factor(parent); DirichletProcess* dp = fctr->dp; double num_reassign = (double) dp->cached_factor_size; double mean_reassign = dp->cached_factor_mean; double sum_sq_devs = dp->cached_factor_sum_sq_dev; double* param_array = base_fctr->factor_data; double mu_denom = param_array[0]; double nu_denom = param_array[1]; double two_alpha_denom = param_array[2]; double beta_denom = param_array[3]; double nu_numer = nu_denom + num_reassign; double two_alpha_numer = two_alpha_denom + num_reassign; double mean_dev = mean_reassign - mu_denom; double sq_mean_dev = nu_denom * num_reassign * mean_dev * mean_dev / nu_numer; double beta_numer = beta_denom + 0.5 * (sum_sq_devs + sq_mean_dev); double log_denom = param_array[4]; double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//, //dp->hdp->log_sum_memo); return -0.5 * num_reassign * log(2.0 * M_PI) + log_numer - log_denom; } double data_pt_factor_parent_likelihood(Factor* data_pt_fctr, Factor* parent) { if (data_pt_fctr->factor_type != DATA_PT) { fprintf(stderr, "Can only access data point likelihood for data point factors.\n"); exit(EXIT_FAILURE); } double data_pt = get_factor_data_pt(data_pt_fctr); Factor* base_fctr = get_base_factor(parent); double* param_array = base_fctr->factor_data; double mu_denom = param_array[0]; double nu_denom = param_array[1]; double two_alpha_denom = param_array[2]; double beta_denom = param_array[3]; double nu_numer = nu_denom + 1.0; double mean_dev = data_pt - mu_denom; double sq_mean_dev = nu_denom * mean_dev * mean_dev / nu_numer; double two_alpha_numer = two_alpha_denom + 1.0; double beta_numer = beta_denom + 0.5 * sq_mean_dev; double log_denom = param_array[4]; double log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//, //base_fctr->dp->hdp->log_sum_memo); return (1.0 / sqrt(2.0 * M_PI)) * exp(log_numer - log_denom); } void evaluate_posterior_predictive(Factor* base_fctr, double* x, double* pdf_out, int64_t length) {//, //SumOfLogsMemo* log_sum_memo) { if (base_fctr->factor_type != BASE) { fprintf(stderr, "Can only evaluate posterior predictive of base factors.\n"); exit(EXIT_FAILURE); } double* param_array = base_fctr->factor_data; double mu_denom = param_array[0]; double nu_denom = param_array[1]; double two_alpha_denom = param_array[2]; double beta_denom = param_array[3]; double log_denom = param_array[4]; double nu_numer = nu_denom + 1.0; double two_alpha_numer = two_alpha_denom + 1.0; double nu_ratio = nu_denom / nu_numer; double pi_factor = 1.0 / sqrt(2.0 * M_PI); double mean_dev; double sq_mean_dev; double beta_numer; double log_numer; for (int64_t i = 0; i < length; i++) { mean_dev = x[i] - mu_denom; sq_mean_dev = nu_ratio * mean_dev * mean_dev; beta_numer = beta_denom + 0.5 * sq_mean_dev; log_numer = log_posterior_conditional_term(nu_numer, two_alpha_numer, beta_numer);//, //log_sum_memo); pdf_out[i] = pi_factor * exp(log_numer - log_denom); } } void evaluate_prior_predictive(HierarchicalDirichletProcess* hdp, double* x, double* pdf_out, int64_t length) { //TODO: this could be made more efficient with some precomputed variables stashed in HDP double mu = hdp->mu; double nu = hdp->nu; double two_alpha = hdp->two_alpha; double beta = hdp->beta; double nu_factor = nu / (2.0 * (nu + 1.0) * beta); //double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo) // - log_gamma_half(two_alpha, hdp->log_sum_memo)); double alpha_term = exp(lgamma(.5 * (two_alpha + 1.0)) - lgamma(.5 * two_alpha)); double beta_term = sqrt(nu_factor / M_PI); double constant_term = alpha_term * beta_term; double alpha_power = -0.5 * (two_alpha + 1.0); for (int64_t i = 0; i < length; i++) { double dev = x[i] - mu; double var_term = pow(1.0 + nu_factor * dev * dev, alpha_power); pdf_out[i] = constant_term * var_term; } } double prior_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) { if (fctr->factor_type != DATA_PT) { fprintf(stderr, "Cannot calculate point prior likelihood from non-data point factor.\n"); } //TODO: this could be made more efficient with some precomputed variables stashed in HDP double mu = hdp->mu; double nu = hdp->nu; double dbl_two_alpha = hdp->two_alpha; //int64_t two_alpha = (int64_t) dbl_two_alpha; double beta = hdp->beta; double data_pt = get_factor_data_pt(fctr); double dev = data_pt - mu; //double alpha_term = exp(log_gamma_half(two_alpha + 1, hdp->log_sum_memo) // - log_gamma_half(two_alpha, hdp->log_sum_memo)); double alpha_term = exp(lgamma(.5 * (dbl_two_alpha + 1.0)) - lgamma(.5 * dbl_two_alpha)); double nu_term = nu / (2.0 * (nu + 1.0) * beta); double beta_term = pow(1.0 + nu_term * dev * dev, -0.5 * (dbl_two_alpha + 1.0)); return alpha_term * sqrt(nu_term / M_PI) * beta_term; } double prior_joint_log_likelihood(HierarchicalDirichletProcess* hdp, Factor* fctr) { if (fctr->factor_type != MIDDLE) { fprintf(stderr, "Cannot calculate joint prior likelihood from non-middle factor.\n"); } double mu = hdp->mu; double nu = hdp->nu; double dbl_two_alpha = hdp->two_alpha; //int64_t two_alpha = (int64_t) dbl_two_alpha; double beta = hdp->beta; DirichletProcess* dp = fctr->dp; int64_t num_reassign = dp->cached_factor_size; double dbl_reassign = (double) num_reassign; double mean_reassign = dp->cached_factor_mean; double sum_sq_devs = dp->cached_factor_sum_sq_dev; double mean_dev = mean_reassign - mu; double sq_mean_dev = nu * dbl_reassign * mean_dev * mean_dev / (nu + dbl_reassign); //double log_alpha_term = log_gamma_half(two_alpha + num_reassign, hdp->log_sum_memo) // - log_gamma_half(two_alpha, hdp->log_sum_memo); double log_alpha_term = lgamma(.5 * (dbl_two_alpha + dbl_reassign)) - lgamma(.5 * dbl_two_alpha); double log_nu_term = 0.5 * (log(nu) - log(nu + dbl_reassign)); double log_pi_term = 0.5 * dbl_reassign * log(2.0 * M_PI); double log_beta_term_1 = dbl_two_alpha * log(beta); double log_beta_term_2 = (dbl_two_alpha + dbl_reassign) * log(beta + 0.5 * (sum_sq_devs + sq_mean_dev)); return log_alpha_term + log_nu_term - log_pi_term + 0.5 * (log_beta_term_1 - log_beta_term_2); } // TODO: figure out how to break into chunks and spin up threads to reduce the sum behind the iterator double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) { DirichletProcess* parent_dp = dp->parent; if (parent_dp == NULL) { return prior_likelihood(dp->hdp, fctr); } else { double parent_gamma = *(parent_dp->gamma); double likelihood = 0.0; double next_height_unobs_likelihood; int64_t num_parent_fctrs = stSet_size(parent_dp->factors); Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs); #pragma omp parallel shared(likelihood,next_height_unobs_likelihood,parent_dp,num_parent_fctrs,parent_fctrs) { #pragma omp single nowait next_height_unobs_likelihood = unobserved_factor_likelihood(fctr, parent_dp); #pragma omp single { stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors); for (int64_t i = 0; i < num_parent_fctrs; i++) { parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter); } stSet_destructIterator(parent_fctr_iter); } double local_likelihood = 0.0; Factor* parent_fctr; #pragma omp for nowait for (int64_t i = 0; i < num_parent_fctrs; i++) { parent_fctr = parent_fctrs[i]; local_likelihood += stSet_size(parent_fctr->children) * data_pt_factor_parent_likelihood(fctr, parent_fctr); } #pragma omp critical likelihood += local_likelihood; } free(parent_fctrs); likelihood += parent_gamma * next_height_unobs_likelihood; likelihood /= (parent_gamma + (double) parent_dp->num_factor_children); return likelihood; } } //double unobserved_factor_likelihood(Factor* fctr, DirichletProcess* dp) { // DirichletProcess* parent_dp = dp->parent; // if (parent_dp == NULL) { // return prior_likelihood(dp->hdp, fctr); // } // else { // double parent_gamma = *(parent_dp->gamma); // double likelihood = 0.0; // // stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors); // // Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter); // double fctr_size; // while (parent_fctr != NULL) { // fctr_size = (double) stSet_size(parent_fctr->children); // likelihood += fctr_size * data_pt_factor_parent_likelihood(fctr, parent_fctr); // parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter); // } // stSet_destructIterator(parent_fctr_iter); // // likelihood += parent_gamma * unobserved_factor_likelihood(fctr, parent_dp); // // likelihood /= (parent_gamma + (double) parent_dp->num_factor_children); // // return likelihood; // } //} double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) { DirichletProcess* parent_dp = dp->parent; if (parent_dp == NULL) { return prior_joint_log_likelihood(dp->hdp, fctr); } else { double parent_gamma = *(parent_dp->gamma); double log_likelihood = MINUS_INF; int64_t num_parent_fctrs = stSet_size(parent_dp->factors); Factor** parent_fctrs = (Factor**) malloc(sizeof(Factor*) * num_parent_fctrs); double next_height_unobs_log_likelihood; #pragma omp parallel shared(log_likelihood,next_height_unobs_log_likelihood,parent_dp,num_parent_fctrs,parent_fctrs) { #pragma omp single nowait next_height_unobs_log_likelihood = unobserved_factor_joint_log_likelihood(fctr, parent_dp); #pragma omp single { stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors); for (int64_t i = 0; i < num_parent_fctrs; i++) { parent_fctrs[i] = (Factor*) stSet_getNext(parent_fctr_iter); } stSet_destructIterator(parent_fctr_iter); } double local_log_likelihood = MINUS_INF; double log_fctr_size; Factor* parent_fctr; #pragma omp for nowait for (int64_t i = 0; i < num_parent_fctrs; i++) { parent_fctr = parent_fctrs[i]; log_fctr_size = log(stSet_size(parent_fctr->children)); local_log_likelihood = add_logs(local_log_likelihood, log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr)); } #pragma omp critical log_likelihood = add_logs(log_likelihood, local_log_likelihood); } free(parent_fctrs); log_likelihood = add_logs(log_likelihood, log(parent_gamma) + next_height_unobs_log_likelihood); log_likelihood -= log(parent_gamma + parent_dp->num_factor_children); return log_likelihood; } } //double unobserved_factor_joint_log_likelihood(Factor* fctr, DirichletProcess* dp) { // DirichletProcess* parent_dp = dp->parent; // if (parent_dp == NULL) { // return prior_joint_log_likelihood(dp->hdp, fctr); // } // else { // double parent_gamma = *(parent_dp->gamma); // // double log_likelihood = MINUS_INF; // stSetIterator* parent_fctr_iter = stSet_getIterator(parent_dp->factors); // Factor* parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter); // double log_fctr_size; // while (parent_fctr != NULL) { // log_fctr_size = log((double) stSet_size(parent_fctr->children)); // log_likelihood = add_logs(log_likelihood, // log_fctr_size + factor_parent_joint_log_likelihood(fctr, parent_fctr)); // parent_fctr = (Factor*) stSet_getNext(parent_fctr_iter); // } // stSet_destructIterator(parent_fctr_iter); // // log_likelihood = add_logs(log_likelihood, // log(parent_gamma) + unobserved_factor_joint_log_likelihood(fctr, parent_dp)); // // log_likelihood -= log(parent_gamma + (double) parent_dp->num_factor_children); // // return log_likelihood; // } //} DirichletProcess* new_dir_proc() { DirichletProcess* dp = (DirichletProcess*) malloc(sizeof(DirichletProcess)); dp->gamma = NULL; dp->depth = 0; dp->parent = NULL; dp->children = stList_construct(); dp->factors = stSet_construct(); dp->num_factor_children = 0; dp->cached_factor_mean = 0.0; dp->cached_factor_sum_sq_dev = 0.0; dp->cached_factor_size = 0; dp->base_factor_wt = 0.0; dp->posterior_predictive = NULL; dp->spline_slopes = NULL; dp->observed = false; return dp; } void clear_factor_tree(Factor* fctr) { if (fctr->children != NULL) { stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children); Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter); while (child_fctr != NULL) { clear_factor_tree(child_fctr); child_fctr = (Factor*) stSet_getNext(child_fctr_iter); } stSet_destructIterator(child_fctr_iter); } else { // note: this will trigger automatic destruction of parent factors destroy_factor(fctr); } } void destroy_dir_proc_factor_tree(DirichletProcess* dp) { if (stSet_size(dp->factors) == 0) { return; } stSetIterator* fctr_iter = stSet_getIterator(dp->factors); Factor* fctr = (Factor*) stSet_getNext(fctr_iter); while (fctr != NULL) { clear_factor_tree(fctr); fctr = (Factor*) stSet_getNext(fctr_iter); } stSet_destructIterator(fctr_iter); } void destroy_dir_proc(DirichletProcess* dp) { destroy_dir_proc_factor_tree(dp); stSet_destruct(dp->factors); if (dp->children != NULL) { stListIterator* st_iterator = stList_getIterator(dp->children); DirichletProcess* dp_child = (DirichletProcess*) stList_getNext(st_iterator); while (dp_child != NULL) { destroy_dir_proc(dp_child); dp_child = (DirichletProcess*) stList_getNext(st_iterator); } stList_destructIterator(st_iterator); stList_destruct(dp->children); } if (dp->parent != NULL) { stList_removeItem(dp->parent->children, (void*) dp); } free(dp->posterior_predictive); free(dp->spline_slopes); free(dp); } // fixed concentration parameters HierarchicalDirichletProcess* new_hier_dir_proc(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, double mu, double nu, double alpha, double beta) { if (nu <= 0.0) { fprintf(stderr, "nu parameter of Normal-Inverse Gamma distribution must be positive.\n"); exit(EXIT_FAILURE); } //if (alpha <= 0.0) { // fprintf(stderr, "alpha parameter of Normal-Inverse Gamma distribution must be positive.\n"); // exit(EXIT_FAILURE); //} if (beta <= 0.0) { fprintf(stderr, "beta parameter of Normal-Inverse Gamma distribution must be positive.\n"); exit(EXIT_FAILURE); } if (2 * alpha != (int64_t) 2 * alpha || alpha <= 1.0) { fprintf(stderr, "Normal-Inverse Gamma parameter 'alpha' must be integer or half-integer > 1.0.\n"); exit(EXIT_FAILURE); } if (gamma != NULL) { for (int64_t i = 0; i < depth; i++) { if (gamma[i] <= 0) { fprintf(stderr, "Concentration parameter gamma must be postive.\n"); exit(EXIT_FAILURE); } } } if (num_dps < 2) { fprintf(stderr, "Hierarchical Dirichlet process formalism requires >= 2 Dirichlet Processes.\n"); exit(EXIT_FAILURE); } double* grid = linspace(sampling_grid_start, sampling_grid_stop, sampling_grid_length); HierarchicalDirichletProcess* hdp = (HierarchicalDirichletProcess*) malloc(sizeof(HierarchicalDirichletProcess)); // normal-inverse gamma parameters hdp->mu = mu; hdp->nu = nu; hdp->two_alpha = 2.0 * alpha; hdp->beta = beta; hdp->gamma = gamma; hdp->depth = depth; hdp->finalized = false; hdp->num_dps = num_dps; DirichletProcess** dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps); for (int64_t i = 0; i < num_dps; i++) { DirichletProcess* dp = new_dir_proc(); dp->id = i; dp->hdp = hdp; dps[i] = dp; } hdp->dps = dps; hdp->base_dp = NULL; hdp->sampling_grid = grid; hdp->grid_length = sampling_grid_length; hdp->samples_taken = 0; hdp->splines_finalized = false; hdp->data = NULL; hdp->data_pt_dp_id = NULL; hdp->data_length = 0; //hdp->log_sum_memo = new_log_sum_memo(); hdp->sample_gamma = false; hdp->gamma_alpha = NULL; hdp->gamma_beta = NULL; hdp->s_aux_vector = NULL; hdp->w_aux_vector = NULL; hdp->distr_metric_memos = stSet_construct2(&destroy_distr_metric_memo); return hdp; } // Gamma prior on concentration parameters HierarchicalDirichletProcess* new_hier_dir_proc_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, double mu, double nu, double alpha, double beta) { for (int64_t i = 0; i < depth; i++) { if (gamma_alpha[i] <= 0.0) { fprintf(stderr, "alpha parameter of Gamma distribution must be positive.\n"); exit(EXIT_FAILURE); } if (gamma_beta[i] <= 0.0) { fprintf(stderr, "beta parameter of Gamma distribution must be positive.\n"); exit(EXIT_FAILURE); } } HierarchicalDirichletProcess* hdp = new_hier_dir_proc(num_dps, depth, NULL, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); hdp->sample_gamma = true; hdp->gamma_alpha = gamma_alpha; hdp->gamma_beta = gamma_beta; double* w = (double*) malloc(sizeof(double) * num_dps); hdp->w_aux_vector = w; bool* s = (bool*) malloc(sizeof(bool) * num_dps); hdp->s_aux_vector = s; for (int64_t i = 0; i < num_dps; i++) { w[i] = 1.0; s[i] = false; } // init to prior expected value double* gamma = (double*) malloc(sizeof(double) * depth); hdp->gamma = gamma; for (int64_t i = 0; i < depth; i++) { gamma[i] = gamma_alpha[i] / gamma_beta[i]; } return hdp; } void destroy_hier_dir_proc(HierarchicalDirichletProcess* hdp) { destroy_dir_proc(hdp->base_dp); free(hdp->gamma); free(hdp->data); free(hdp->data_pt_dp_id); free(hdp->dps); free(hdp->sampling_grid); //destroy_log_sum_memo(hdp->log_sum_memo); free(hdp->gamma_alpha); free(hdp->gamma_beta); free(hdp->w_aux_vector); free(hdp->s_aux_vector); stSet_destruct(hdp->distr_metric_memos); free(hdp); } void establish_base_dp(HierarchicalDirichletProcess* hdp) { DirichletProcess** dps = hdp->dps; int64_t num_dps = hdp->num_dps; DirichletProcess* dp; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; if (dp->parent == NULL) { if (hdp->base_dp == NULL) { hdp->base_dp = dp; } else { fprintf(stderr, "Hierarchical Dirichlet process contains orphaned Dirichlet process.\n"); exit(EXIT_FAILURE); } } } if (hdp->base_dp == NULL) { fprintf(stderr, "Hierarchical Dirichlet process does not contain base Dirichlet process.\n"); exit(EXIT_FAILURE); } } // DFS to verify that Dirichlet processes follow tree structure void verify_dp_tree(HierarchicalDirichletProcess* hdp) { int64_t num_dps = hdp->num_dps; bool* visited = (bool*) malloc(sizeof(bool) * num_dps); for (int64_t i = 0; i < num_dps; i++) { visited[i] = false; } DirichletProcess* base_dp = hdp->base_dp; stList* stck = stList_construct(); stList_append(stck, (void*) base_dp); DirichletProcess* dp; while (stList_length(stck) > 0) { dp = (DirichletProcess*) stList_pop(stck); if (visited[dp->id]) { fprintf(stderr, "Hierarchical Dirichlet process does not have tree structure.\n"); exit(EXIT_FAILURE); } visited[dp->id] = true; stListIterator* child_iter = stList_getIterator(dp->children); DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter); while (child != NULL) { stList_append(stck, (void*) child); child = (DirichletProcess*) stList_getNext(child_iter); } stList_destructIterator(child_iter); } stList_destruct(stck); free(visited); } void verify_tree_depth(HierarchicalDirichletProcess* hdp, DirichletProcess* dp, int64_t current_depth, int64_t leaf_depth) { dp->gamma = &(hdp->gamma[current_depth]); dp->depth = current_depth; if (stList_length(dp->children) == 0) { if (current_depth != leaf_depth) { fprintf(stderr, "Hierarchical Dirichlet process has leaf Dirichlet process at incorrect depth.\n"); exit(EXIT_FAILURE); } } else { stListIterator* st_iterator = stList_getIterator(dp->children); DirichletProcess* child = (DirichletProcess*) stList_getNext(st_iterator); while (child != NULL) { verify_tree_depth(hdp, child, current_depth + 1, leaf_depth); child = (DirichletProcess*) stList_getNext(st_iterator); } stList_destructIterator(st_iterator); } } void verify_valid_dp_assignments(HierarchicalDirichletProcess* hdp) { int64_t length = hdp->data_length; int64_t num_dps = hdp->num_dps; DirichletProcess** dps = hdp->dps; int64_t* dp_ids = hdp->data_pt_dp_id; int64_t id; DirichletProcess* dp; for (int64_t i = 0; i < length; i++) { id = dp_ids[i]; if (id >= num_dps || id < 0) { fprintf(stderr, "Data point is assigned to non-existent Dirichlet process.\n"); exit(EXIT_FAILURE); } dp = dps[id]; if (stList_length(dp->children) > 0) { fprintf(stderr, "Data point cannot be assigned to non-leaf Dirichlet process.\n"); exit(EXIT_FAILURE); } } } void mark_observed_dps(HierarchicalDirichletProcess* hdp) { // mark newly observed dps int64_t length = hdp->data_length; DirichletProcess** dps = hdp->dps; int64_t* dp_ids = hdp->data_pt_dp_id; int64_t grid_length = hdp->grid_length; DirichletProcess* dp; double* pdf; int64_t id; for (int64_t i = 0; i < length; i++) { id = dp_ids[i]; dp = dps[id]; while (dp != NULL) { if (dp->observed) { break; } dp->observed = true; pdf = (double*) malloc(sizeof(double) * grid_length); dp->posterior_predictive = pdf; for (int64_t j = 0; j < grid_length; j++) { pdf[j] = 0.0; } dp = dp->parent; } } } void k_means(int64_t k, double* data, int64_t length, int64_t max_iters, int64_t num_restarts, int64_t** assignments_out, double** centroids_out) { if (k > length) { fprintf(stderr, "Must have at least as many data points as clusters.\n"); exit(EXIT_FAILURE); } if (k <= 0) { fprintf(stderr, "Must have at least one cluster.\n"); exit(EXIT_FAILURE); } int64_t* best_assignments = NULL; double* best_centroids = NULL; double best_sum_dist = DBL_MAX; int64_t* centroid_counts = (int64_t*) malloc(sizeof(int64_t) * k); for (int64_t restart = 0; restart < num_restarts; restart++) { double* centroids = (double*) malloc(sizeof(double) * k); int64_t* assignments = (int64_t*) malloc(sizeof(int64_t) * length); for (int64_t i = 0; i < length; i++) { assignments[i] = -1; } for (int64_t i = 0; i < k; i++) { centroids[i] = data[rand() % length]; } for (int64_t iter = 0; iter < max_iters; iter++) { bool converged = true; for (int64_t i = 0; i < length; i++) { double data_pt = data[i]; double dist = fabs(data_pt - centroids[0]); double closest_dist = dist; int64_t closest_centroid = 0; for (int64_t j = 1; j < k; j++) { dist = fabs(data_pt - centroids[j]); if (dist < closest_dist) { closest_centroid = j; closest_dist = dist; } } if (assignments[i] != closest_centroid) { converged = false; } assignments[i] = closest_centroid; } if (converged) { break; } for (int64_t i = 0; i < k; i++) { centroids[i] = 0.0; centroid_counts[i] = 0; } int64_t assignment; for (int64_t i = 0; i < length; i++) { assignment = assignments[i]; centroids[assignment] += data[i]; centroid_counts[assignment]++; } for (int64_t i = 0; i < k; i++) { if (centroid_counts[i] > 0) { centroids[i] /= centroid_counts[i]; } else { centroids[i] = data[rand() % length]; } } } double sum_dist = 0.0; for (int64_t i = 0; i < length; i++) { sum_dist += fabs(data[i] - centroids[assignments[i]]); } if (sum_dist < best_sum_dist) { free(best_centroids); free(best_assignments); best_centroids = centroids; best_assignments = assignments; best_sum_dist = sum_dist; } else { free(centroids); free(assignments); } } free(centroid_counts); *centroids_out = best_centroids; *assignments_out = best_assignments; } //void fill_k_means_factor_bank(Factor*** fctr_bank, int64_t* dp_depths, DirichletProcess* dp, // int64_t depth, int64_t* expected_num_factors) { // int64_t dp_id = dp->id; // dp_depths[dp_id] = depth; // // int64_t expected_num = expected_num_factors[depth]; // // Factor** dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * expected_num); // fctr_bank[dp_id] = dp_fctr_bank; // for (int64_t i = 0; i < expected_num; i++) { // dp_fctr_bank[i] = NULL; // } // // //} void get_dp_depths_internal(int64_t* dp_depths, DirichletProcess* dp, int64_t depth) { dp_depths[dp->id] = depth; stListIterator* dp_child_iter = stList_getIterator(dp->children); DirichletProcess* dp_child = stList_getNext(dp_child_iter); while (dp_child != NULL) { get_dp_depths_internal(dp_depths, dp_child, depth + 1); dp_child = stList_getNext(dp_child_iter); } stList_destructIterator(dp_child_iter); } int64_t* get_dp_depths(HierarchicalDirichletProcess* hdp) { int64_t* dp_depths = (int64_t*) malloc(sizeof(int64_t) * hdp->num_dps); get_dp_depths_internal(dp_depths, hdp->base_dp, 0); return dp_depths; } void k_means_init_factors(HierarchicalDirichletProcess* hdp, int64_t max_iters, int64_t num_restarts) { int64_t tree_depth = hdp->depth; double* gamma_params = hdp->gamma; int64_t num_data = hdp->data_length; int64_t* data_pt_dp_id = hdp->data_pt_dp_id; int64_t num_dps = hdp->num_dps; int64_t* dp_depths = get_dp_depths(hdp); int64_t* depth_dp_counts = (int64_t*) malloc(sizeof(int64_t) * tree_depth); for (int64_t i = 0; i < tree_depth; i++) { depth_dp_counts[i] = 0; } for (int64_t i = 0; i < num_dps; i++) { depth_dp_counts[dp_depths[i]]++; } int64_t* expected_num_factors = (int64_t*) malloc(sizeof(int64_t) * tree_depth); double stat_expect = gamma_params[0] * log(1.0 + num_data / gamma_params[0]); expected_num_factors[0] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - 1]) + 1; for (int64_t i = 1; i < tree_depth; i++) { int64_t num_lower_factors = expected_num_factors[i - 1]; stat_expect = gamma_params[i] * log(1.0 + num_lower_factors / gamma_params[i]); expected_num_factors[i] = ((int64_t) stat_expect / depth_dp_counts[tree_depth - i - 1]) + 1; if (expected_num_factors[i] > num_lower_factors) { expected_num_factors[i] = num_lower_factors; } } int64_t** cluster_assignments = (int64_t**) malloc(sizeof(int64_t*) * tree_depth); double** factor_centers = (double**) malloc(sizeof(double*) * tree_depth); k_means(expected_num_factors[0], hdp->data, num_data, max_iters, num_restarts, &cluster_assignments[0], &factor_centers[0]); for (int64_t i = 1; i < tree_depth; i++) { k_means(expected_num_factors[i], factor_centers[i - 1], expected_num_factors[i - 1], max_iters, num_restarts, &cluster_assignments[i], &factor_centers[i]); } Factor*** fctr_bank = (Factor***) malloc(sizeof(Factor**) * num_dps); int64_t num_potential_factors; int64_t depth; Factor** dp_fctr_bank; for (int64_t i = 0; i < num_dps; i++) { depth = dp_depths[i]; num_potential_factors = expected_num_factors[tree_depth - depth - 1]; dp_fctr_bank = (Factor**) malloc(sizeof(Factor*) * num_potential_factors); for (int64_t j = 0; j < num_potential_factors; j++) { dp_fctr_bank[j] = NULL; } fctr_bank[i] = dp_fctr_bank; } DirichletProcess** dps = hdp->dps; DirichletProcess* dp; int64_t dp_id; Factor* data_pt_fctr; Factor* parent_fctr; int64_t parent_fctr_num; for (int64_t i = 0; i < num_data; i++) { data_pt_fctr = new_data_pt_factor(hdp, i); dp_id = data_pt_dp_id[i]; dp = dps[dp_id]; parent_fctr_num = cluster_assignments[0][i]; parent_fctr = fctr_bank[dp_id][parent_fctr_num]; if (parent_fctr == NULL) { parent_fctr = new_middle_factor(dps[dp_id]); fctr_bank[dp_id][parent_fctr_num] = parent_fctr; } data_pt_fctr->parent = parent_fctr; stSet_insert(parent_fctr->children, (void*) data_pt_fctr); (dp->num_factor_children)++; } DirichletProcess* parent_dp; Factor** parent_dp_fctr_bank; int64_t* assignments; int64_t expected_num; Factor* fctr; // could make this faster with recursion instead of multiple passes for (int64_t depth = tree_depth - 1; depth > 0; depth--) { assignments = cluster_assignments[tree_depth - depth]; expected_num = expected_num_factors[tree_depth - depth - 1]; for (int64_t i = 0; i < num_dps; i++) { if (dp_depths[i] != depth) { continue; } dp = dps[i]; parent_dp = dp->parent; dp_fctr_bank = fctr_bank[i]; parent_dp_fctr_bank = fctr_bank[parent_dp->id]; for (int64_t j = 0; j < expected_num; j++) { fctr = dp_fctr_bank[j]; if (fctr == NULL) { continue; } parent_fctr_num = assignments[j]; parent_fctr = parent_dp_fctr_bank[parent_fctr_num]; if (parent_fctr == NULL) { if (depth > 1) { parent_fctr = new_middle_factor(parent_dp); } else { parent_fctr = new_base_factor(hdp); } parent_dp_fctr_bank[parent_fctr_num] = parent_fctr; } fctr->parent = parent_fctr; stSet_insert(parent_fctr->children, (void*) fctr); (parent_dp->num_factor_children)++; } } } double mean, sum_sq_devs; int64_t num_fctr_data; stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors); Factor* base_fctr = stSet_getNext(base_fctr_iter); while (base_fctr != NULL) { get_factor_stats(base_fctr, &mean, &sum_sq_devs, &num_fctr_data); add_update_base_factor_params(base_fctr, mean, sum_sq_devs, (double) num_fctr_data); base_fctr = stSet_getNext(base_fctr_iter); } stSet_destructIterator(base_fctr_iter); for (int64_t i = 0; i < num_dps; i++) { free(fctr_bank[i]); } for (int64_t i = 0; i < tree_depth; i++) { free(cluster_assignments[i]); free(factor_centers[i]); } free(cluster_assignments); free(factor_centers); free(expected_num_factors); free(fctr_bank); free(dp_depths); free(depth_dp_counts); } void init_factors_internal(DirichletProcess* dp, Factor* parent_fctr, stList** data_pt_fctr_lists) { if (!dp->observed) { return; } Factor* fctr = new_middle_factor(dp); fctr->parent = parent_fctr; stSet_insert(parent_fctr->children, (void*) fctr); if (stList_length(dp->children) == 0) { stSet* children = fctr->children; stListIterator* data_pt_fctr_iter = stList_getIterator(data_pt_fctr_lists[dp->id]); Factor* data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter); while (data_pt_fctr != NULL) { data_pt_fctr->parent = fctr; stSet_insert(children, (void*) data_pt_fctr); data_pt_fctr = (Factor*) stList_getNext(data_pt_fctr_iter); } stList_destructIterator(data_pt_fctr_iter); } else { stListIterator* child_dp_iter = stList_getIterator(dp->children); DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); while (child_dp != NULL) { init_factors_internal(child_dp, fctr, data_pt_fctr_lists); child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); } stList_destructIterator(child_dp_iter); } } void init_factors(HierarchicalDirichletProcess* hdp) { int64_t data_length = hdp->data_length; int64_t* data_pt_dp_id = hdp->data_pt_dp_id; DirichletProcess** dps = hdp->dps; int64_t num_dps = hdp->num_dps; stList** data_pt_fctr_lists = (stList**) malloc(sizeof(stList*) * num_dps); for (int64_t i = 0; i < num_dps; i++) { data_pt_fctr_lists[i] = NULL; } Factor* data_pt_fctr; int64_t dp_id; stList* fctr_list; for (int64_t data_pt_idx = 0; data_pt_idx < data_length; data_pt_idx++) { dp_id = data_pt_dp_id[data_pt_idx]; fctr_list = data_pt_fctr_lists[dp_id]; if (fctr_list == NULL) { fctr_list = stList_construct(); data_pt_fctr_lists[dp_id] = fctr_list; } data_pt_fctr = new_data_pt_factor(hdp, data_pt_idx); stList_append(fctr_list, (void*) data_pt_fctr); } DirichletProcess* base_dp = hdp->base_dp; Factor* root_factor = new_base_factor(hdp); stListIterator* child_dp_iter = stList_getIterator(base_dp->children); DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); while (child_dp != NULL) { init_factors_internal(child_dp, root_factor, data_pt_fctr_lists); child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); } stList_destructIterator(child_dp_iter); for (int64_t i = 0; i < num_dps; i++) { if (data_pt_fctr_lists[i] != NULL) { stList_destruct(data_pt_fctr_lists[i]); } } free(data_pt_fctr_lists); double mean, sum_sq_devs; int64_t num_data; get_factor_stats(root_factor, &mean, &sum_sq_devs, &num_data); add_update_base_factor_params(root_factor, mean, sum_sq_devs, (double) num_data); int64_t fctr_child_count; DirichletProcess* dp; Factor* fctr; stSetIterator* fctr_iter; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; fctr_child_count = 0; fctr_iter = stSet_getIterator(dp->factors); fctr = (Factor*) stSet_getNext(fctr_iter); while (fctr != NULL) { fctr_child_count += stSet_size(fctr->children); fctr = (Factor*) stSet_getNext(fctr_iter); } stSet_destructIterator(fctr_iter); dp->num_factor_children = fctr_child_count; } } void finalize_data(HierarchicalDirichletProcess* hdp) { verify_valid_dp_assignments(hdp); mark_observed_dps(hdp); init_factors(hdp); //k_means_init_factors(hdp, 500, 5); } void set_dir_proc_parent(HierarchicalDirichletProcess* hdp, int64_t child_id, int64_t parent_id) { if (hdp->finalized) { fprintf(stderr, "Hierarchical Dirichlet process structure has been finalized. Cannot set new parent.\n"); exit(EXIT_FAILURE); } if (child_id >= hdp->num_dps || parent_id >= hdp->num_dps || child_id < 0 || parent_id < 0) { fprintf(stderr, "Dirichlet process ID does not exist.\n"); exit(EXIT_FAILURE); } DirichletProcess* child_dp = hdp->dps[child_id]; DirichletProcess* parent_dp = hdp->dps[parent_id]; if (child_dp->parent != NULL) { fprintf(stderr, "Dirichlet process already has parent.\n"); exit(EXIT_FAILURE); } child_dp->parent = parent_dp; stList_append(parent_dp->children, (void*) child_dp); } void pass_data_to_hdp(HierarchicalDirichletProcess* hdp, double* data, int64_t* dp_ids, int64_t length) { if (hdp->data != NULL) { fprintf(stderr, "Hierarchical Dirichlet process must be reset before passing new data.\n"); exit(EXIT_FAILURE); } hdp->data = data; hdp->data_pt_dp_id = dp_ids; hdp->data_length = length; if (hdp->finalized) { finalize_data(hdp); } } void finalize_hdp_structure(HierarchicalDirichletProcess* hdp) { establish_base_dp(hdp); verify_dp_tree(hdp); verify_tree_depth(hdp, hdp->base_dp, 0, hdp->depth - 1); if (hdp->data != NULL) { finalize_data(hdp); } hdp->finalized = true; } void reset_distr_metric_memo(DistributionMetricMemo* memo) { int64_t num_distrs = memo->num_distrs; int64_t num_entries = ((num_distrs - 1) * num_distrs) / 2; double* memo_entries = memo->memo_matrix; for (int64_t i = 0; i < num_entries; i++) { memo_entries[i] = -1.0; } } void reset_hdp_data(HierarchicalDirichletProcess* hdp) { if (hdp->data == NULL && hdp->data_pt_dp_id == NULL) { return; } free(hdp->data); hdp->data = NULL; free(hdp->data_pt_dp_id); hdp->data_pt_dp_id = NULL; DirichletProcess** dps = hdp->dps; int64_t num_dps = hdp->num_dps; destroy_dir_proc_factor_tree(hdp->base_dp); DirichletProcess* dp; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; free(dp->posterior_predictive); dp->posterior_predictive = NULL; free(dp->spline_slopes); dp->spline_slopes = NULL; dp->observed = false; } stSetIterator* memo_iter = stSet_getIterator(hdp->distr_metric_memos); DistributionMetricMemo* memo = stSet_getNext(memo_iter); while (memo != NULL) { reset_distr_metric_memo(memo); memo = stSet_getNext(memo_iter); } stSet_destructIterator(memo_iter); hdp->splines_finalized = false; hdp->samples_taken = 0; if (hdp->sample_gamma) { double* gamma = hdp->gamma; double* gamma_alpha = hdp->gamma_alpha; double* gamma_beta = hdp->gamma_beta; for (int64_t depth = 0; depth < hdp->depth; depth++) { gamma[depth] = gamma_alpha[depth] / gamma_beta[depth]; } double* w = hdp->w_aux_vector; bool* s = hdp->s_aux_vector; for (int64_t i = 0; i < num_dps; i++) { w[i] = 1.0; s[i] = false; } } } void unassign_from_parent(Factor* fctr) { if (fctr->factor_type == BASE) { fprintf(stderr, "Cannot unassign base factor's parent.\n"); exit(EXIT_FAILURE); } Factor* parent = fctr->parent; Factor* base_fctr = get_base_factor(parent); DirichletProcess* base_dp = base_fctr->dp; stSet_remove(parent->children, (void*) fctr); fctr->parent = NULL; (parent->dp->num_factor_children)--; if (stSet_size(parent->children) == 0) { destroy_factor(parent); } int64_t num_reassign; double mean_reassign; double sum_sq_devs; get_factor_stats(fctr, &mean_reassign, &sum_sq_devs, &num_reassign); // check to see if base factor has been destroyed if (stSet_search(base_dp->factors, (void*) base_fctr) != NULL) { remove_update_base_factor_params(base_fctr, mean_reassign, sum_sq_devs, (double) num_reassign); } DirichletProcess* dp = fctr->dp; if (dp != NULL) { dp->cached_factor_mean = mean_reassign; dp->cached_factor_size = num_reassign; dp->cached_factor_sum_sq_dev = sum_sq_devs; } } void assign_to_parent(Factor* fctr, Factor* parent, bool update_params) { if (fctr->factor_type == BASE) { fprintf(stderr, "Cannot assign base factor to a parent.\n"); exit(EXIT_FAILURE); } if (parent->factor_type == DATA_PT) { fprintf(stderr, "Cannot assign data point factor to be parent.\n"); exit(EXIT_FAILURE); } fctr->parent = parent; stSet_insert(parent->children, (void*) fctr); (parent->dp->num_factor_children)++; Factor* base_fctr = get_base_factor(parent); if (!update_params) { return; } if (fctr->factor_type == DATA_PT) { double data_pt = get_factor_data_pt(fctr); add_update_base_factor_params(base_fctr, data_pt, 0.0, 1.0); } else { DirichletProcess* dp = fctr->dp; add_update_base_factor_params(base_fctr, dp->cached_factor_mean, dp->cached_factor_sum_sq_dev, (double) dp->cached_factor_size); } } //Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) { // if (fctr->factor_type != DATA_PT) { // fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n"); // exit(EXIT_FAILURE); // } // // stSet* pool = dp->factors; // int64_t num_fctrs = stSet_size(pool); // // Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs); // // double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1)); // double cumul = 0.0; // // stSetIterator* pool_iter = stSet_getIterator(pool); // Factor* fctr_option; // double fctr_size; // for (int64_t i = 0; i < num_fctrs; i++) { // fctr_option = (Factor*) stSet_getNext(pool_iter); // fctr_order[i] = fctr_option; // // fctr_size = (double) stSet_size(fctr_option->children); // cumul += fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option); // cdf[i] = cumul; // } // stSet_destructIterator(pool_iter); // // double gamma_param = *(dp->gamma); // cumul += gamma_param * unobserved_factor_likelihood(fctr, dp); // cdf[num_fctrs] = cumul; // // int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_fctrs + 1); // // Factor* fctr_choice; // if (choice_idx == num_fctrs) { // free(fctr_order); // DirichletProcess* parent_dp = dp->parent; // if (parent_dp == NULL) { // fctr_choice = new_base_factor(dp->hdp); // } // else { // fctr_choice = new_middle_factor(dp); // Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp); // assign_to_parent(fctr_choice, new_fctr_parent, false); // } // } // else { // fctr_choice = fctr_order[choice_idx]; // free(fctr_order); // } // // return fctr_choice; //} Factor* sample_from_data_pt_factor(Factor* fctr, DirichletProcess* dp) { if (fctr->factor_type != DATA_PT) { fprintf(stderr, "Attempted a data point factor sample from non-data point factor.\n"); exit(EXIT_FAILURE); } stSet* pool = dp->factors; int64_t num_fctrs = stSet_size(pool); Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs); stSetIterator* pool_iter = stSet_getIterator(pool); for (int64_t i = 0; i < num_fctrs; i++) { Factor* fctr_option = (Factor*) stSet_getNext(pool_iter); fctr_order[i] = fctr_option; } stSet_destructIterator(pool_iter); double* probs = (double*) malloc(sizeof(double) * num_fctrs); double new_fctr_prob; #pragma omp parallel shared(new_fctr_prob,probs) { #pragma omp single nowait new_fctr_prob = (*(dp->gamma)) * unobserved_factor_likelihood(fctr, dp); Factor* fctr_option; #pragma omp for for (int64_t i = 0; i < num_fctrs; i++) { fctr_option = fctr_order[i]; probs[i] = stSet_size(fctr_option->children) * data_pt_factor_parent_likelihood(fctr, fctr_option); } } double* cdf = (double*) malloc(sizeof(double) * (num_fctrs + 1)); parallel_cdf(cdf, probs, num_fctrs, 10); cdf[num_fctrs] = cdf[num_fctrs - 1] + new_fctr_prob; int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_fctrs + 1); Factor* fctr_choice; if (choice_idx == num_fctrs) { free(fctr_order); DirichletProcess* parent_dp = dp->parent; if (parent_dp == NULL) { fctr_choice = new_base_factor(dp->hdp); } else { fctr_choice = new_middle_factor(dp); Factor* new_fctr_parent = sample_from_data_pt_factor(fctr, parent_dp); assign_to_parent(fctr_choice, new_fctr_parent, false); } } else { fctr_choice = fctr_order[choice_idx]; free(fctr_order); } return fctr_choice; } //Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) { // if (fctr->factor_type != MIDDLE) { // fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n"); // exit(EXIT_FAILURE); // } // // stSet* pool = dp->factors; // int64_t num_fctrs = stSet_size(pool); // int64_t num_choices = num_fctrs + 1; // // Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs); // double* log_probs = (double*) malloc(sizeof(double) * num_choices); // // stSetIterator* pool_iter = stSet_getIterator(pool); // for (int64_t i = 0; i < num_fctrs; i++) { // Factor* fctr_option = (Factor*) stSet_getNext(pool_iter); // fctr_order[i] = fctr_option; // log_probs[i] = log((double) stSet_size(fctr_option->children)) // + factor_parent_joint_log_likelihood(fctr, fctr_option); // } // stSet_destructIterator(pool_iter); // // log_probs[num_fctrs] = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp); // // double* cdf = (double*) malloc(sizeof(double) * num_choices); // double cumul = 0.0; // double normalizing_const = max(log_probs, num_choices); // // for (int64_t i = 0; i < num_choices; i++) { // cumul += exp(log_probs[i] - normalizing_const); // cdf[i] = cumul; // } // // free(log_probs); // // int64_t choice_idx = bisect_left(rand_uniform(cumul), cdf, num_choices); // free(cdf); // // Factor* fctr_choice; // if (choice_idx == num_fctrs) { // free(fctr_order); // DirichletProcess* parent_dp = dp->parent; // if (parent_dp == NULL) { // fctr_choice = new_base_factor(dp->hdp); // } // else { // fctr_choice = new_middle_factor(dp); // Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp); // assign_to_parent(fctr_choice, new_fctr_parent, false); // } // } // else { // fctr_choice = fctr_order[choice_idx]; // free(fctr_order); // } // // return fctr_choice; //} Factor* sample_from_middle_factor(Factor* fctr, DirichletProcess* dp) { if (fctr->factor_type != MIDDLE) { fprintf(stderr, "Attempted a middle factor sample from non-middle factor.\n"); exit(EXIT_FAILURE); } stSet* pool = dp->factors; int64_t num_fctrs = stSet_size(pool); int64_t num_choices = num_fctrs + 1; Factor** fctr_order = (Factor**) malloc(sizeof(Factor*) * num_fctrs); double* log_probs = (double*) malloc(sizeof(double) * num_choices); stSetIterator* pool_iter = stSet_getIterator(pool); for (int64_t i = 0; i < num_fctrs; i++) { fctr_order[i] = (Factor*) stSet_getNext(pool_iter); } stSet_destructIterator(pool_iter); double new_fctr_log_prob; #pragma omp parallel shared(new_fctr_log_prob,log_probs) { #pragma omp single nowait new_fctr_log_prob = log(*(dp->gamma)) + unobserved_factor_joint_log_likelihood(fctr, dp); #pragma omp for for (int64_t i = 0; i < num_fctrs; i++) { Factor* fctr_option = fctr_order[i]; log_probs[i] = log((double) stSet_size(fctr_option->children)) + factor_parent_joint_log_likelihood(fctr, fctr_option); } } log_probs[num_fctrs] = new_fctr_log_prob; double normalizing_const = parallel_max(log_probs, num_choices); parallel_add(-normalizing_const, log_probs, num_choices); parallel_exp(log_probs, num_choices); double* cdf = (double*) malloc(sizeof(double) * num_choices); parallel_cdf(cdf, log_probs, num_choices, 10); free(log_probs); int64_t choice_idx = bisect_left(rand_uniform(cdf[num_fctrs]), cdf, num_choices); free(cdf); Factor* fctr_choice; if (choice_idx == num_fctrs) { free(fctr_order); DirichletProcess* parent_dp = dp->parent; if (parent_dp == NULL) { fctr_choice = new_base_factor(dp->hdp); } else { fctr_choice = new_middle_factor(dp); Factor* new_fctr_parent = sample_from_middle_factor(fctr, parent_dp); assign_to_parent(fctr_choice, new_fctr_parent, false); } } else { fctr_choice = fctr_order[choice_idx]; free(fctr_order); } return fctr_choice; } Factor* sample_factor(Factor* fctr, DirichletProcess* dp) { if (fctr->factor_type == DATA_PT) { return sample_from_data_pt_factor(fctr, dp); } else if (fctr->factor_type == MIDDLE) { return sample_from_middle_factor(fctr, dp); } else { fprintf(stderr, "Cannot sample base factor parent assignments.\n"); exit(EXIT_FAILURE); } } void gibbs_factor_iteration(Factor* fctr) { DirichletProcess* parent_dp = fctr->parent->dp; unassign_from_parent(fctr); Factor* new_parent = sample_factor(fctr, parent_dp); assign_to_parent(fctr, new_parent, true); } void cache_prior_contribution(DirichletProcess* dp, double parent_prior_prod) { if (!(dp->observed)) { return; } double gamma_param = *(dp->gamma); double total_children = (double) dp->num_factor_children; double prior_prod = (gamma_param / (gamma_param + total_children)) * parent_prior_prod; dp->base_factor_wt += prior_prod; stListIterator* child_iter = stList_getIterator(dp->children); DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter); while (child != NULL) { cache_prior_contribution(child, prior_prod); child = (DirichletProcess*) stList_getNext(child_iter); } stList_destructIterator(child_iter); } void cache_base_factor_weight(Factor* fctr) { DirichletProcess* dp = fctr->dp; double gamma_param = *(dp->gamma); double total_children = (double) dp->num_factor_children; double wt = ((double) stSet_size(fctr->children)) / (gamma_param + total_children); dp->base_factor_wt += wt; if (stList_length(dp->children) > 0) { stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children); Factor* child_fctr = (Factor*) stSet_getNext(child_fctr_iter); while (child_fctr != NULL) { cache_base_factor_weight(child_fctr); child_fctr = (Factor*) stSet_getNext(child_fctr_iter); } stSet_destructIterator(child_fctr_iter); stListIterator* child_dp_iter = stList_getIterator(dp->children); DirichletProcess* child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); while (child_dp != NULL) { cache_prior_contribution(child_dp, wt); child_dp = (DirichletProcess*) stList_getNext(child_dp_iter); } stList_destructIterator(child_dp_iter); } } void push_factor_distr(DirichletProcess* dp, double* distr, int64_t length) { double* sample_collector = dp->posterior_predictive; double wt = dp->base_factor_wt; for (int64_t i = 0; i < length; i++) { sample_collector[i] += wt * distr[i]; } dp->base_factor_wt = 0.0; stListIterator* child_iter = stList_getIterator(dp->children); DirichletProcess* child = (DirichletProcess*) stList_getNext(child_iter); while (child != NULL) { if (child->observed) { push_factor_distr(child, distr, length); } child = (DirichletProcess*) stList_getNext(child_iter); } stList_destructIterator(child_iter); } void take_distr_sample(HierarchicalDirichletProcess* hdp) { DirichletProcess* base_dp = hdp->base_dp; double* grid = hdp->sampling_grid; int64_t length = hdp->grid_length; double* pdf = (double*) malloc(sizeof(double) * length); //SumOfLogsMemo* log_sum_memo = hdp->log_sum_memo; stSetIterator* base_fctr_iter = stSet_getIterator(base_dp->factors); Factor* base_fctr = (Factor*) stSet_getNext(base_fctr_iter); while (base_fctr != NULL) { cache_base_factor_weight(base_fctr); evaluate_posterior_predictive(base_fctr, grid, pdf, length);//, log_sum_memo); push_factor_distr(base_dp, pdf, length); base_fctr = (Factor*) stSet_getNext(base_fctr_iter); } stSet_destructIterator(base_fctr_iter); cache_prior_contribution(base_dp, 1.0); evaluate_prior_predictive(hdp, grid, pdf, length); push_factor_distr(base_dp, pdf, length); (hdp->samples_taken)++; free(pdf); } // Knuth shuffle algorithm DirichletProcess** get_shuffled_dps(HierarchicalDirichletProcess* hdp) { int64_t num_dps = hdp->num_dps; DirichletProcess** dps = hdp->dps; DirichletProcess** shuffled_dps = (DirichletProcess**) malloc(sizeof(DirichletProcess*) * num_dps); int64_t pos; for (int64_t i = 0; i < num_dps; i++) { pos = rand() % (i + 1); shuffled_dps[i] = shuffled_dps[pos]; shuffled_dps[pos] = dps[i]; } return shuffled_dps; } void sample_dp_factors(DirichletProcess* dp, int64_t* iter_counter, int64_t burn_in, int64_t thinning, int64_t* sample_counter, int64_t num_samples) { if (!dp->observed) { return; } int64_t iter = *iter_counter; int64_t samples_taken = *sample_counter; // have to pre-allocate the array of sampling factors in case reassignment triggers // destruction of the set the iterator is iterating through int64_t num_factor_children = dp->num_factor_children; Factor** sampling_fctrs = (Factor**) malloc(sizeof(Factor*) * num_factor_children); int64_t i = 0; stSetIterator* fctr_iter = stSet_getIterator(dp->factors); Factor* fctr = (Factor*) stSet_getNext(fctr_iter); stSetIterator* child_fctr_iter; Factor* child_fctr; while (fctr != NULL) { child_fctr_iter = stSet_getIterator(fctr->children); child_fctr = (Factor*) stSet_getNext(child_fctr_iter); while (child_fctr != NULL) { sampling_fctrs[i] = child_fctr; i++; child_fctr = (Factor*) stSet_getNext(child_fctr_iter); } stSet_destructIterator(child_fctr_iter); fctr = (Factor*) stSet_getNext(fctr_iter); } stSet_destructIterator(fctr_iter); for (int64_t j = 0; j < num_factor_children; j++) { gibbs_factor_iteration(sampling_fctrs[j]); iter++; if (iter % thinning == 0) { if (iter > burn_in) { take_distr_sample(dp->hdp); samples_taken++; if (samples_taken >= num_samples) { break; } } } } free(sampling_fctrs); *sample_counter = samples_taken; *iter_counter = iter; } double sample_auxilliary_w(DirichletProcess* dp) { return (double) genbet((float) *(dp->gamma) + 1.0, (float) dp->num_factor_children); } bool sample_auxilliary_s(DirichletProcess* dp) { double num_children = (double) dp->num_factor_children; return rand_bernoulli(num_children / (num_children + *(dp->gamma))); } void sample_gamma_aux_vars(HierarchicalDirichletProcess* hdp) { double* w = hdp->w_aux_vector; bool* s = hdp->s_aux_vector; DirichletProcess** dps = hdp->dps; int64_t num_dps = hdp->num_dps; DirichletProcess* dp; for (int64_t id = 0; id < num_dps; id++) { dp = dps[id]; if (!dp->observed) { continue; } w[id] = sample_auxilliary_w(dp); s[id] = sample_auxilliary_s(dp); } } void sample_base_gamma_internal(HierarchicalDirichletProcess* hdp, double log_w, int64_t num_factors) { // Escobar and West's (1995) algorithm DirichletProcess* base_dp = hdp->base_dp; double gamma_alpha = hdp->gamma_alpha[0]; double gamma_beta = hdp->gamma_beta[0]; double num_children = (double) base_dp->num_factor_children; double gamma_beta_post = gamma_beta - log_w; double gamma_alpha_post = gamma_alpha + (double) num_factors; double frac = (gamma_alpha_post - 1.0) / (num_children * gamma_beta_post); double wt = frac / (1.0 + frac); // note: different parameterization switches alpha and beta float sample_gamma = wt * gengam(gamma_beta_post, gamma_alpha_post) + (1 - wt) * gengam(gamma_beta_post, gamma_alpha_post - 1.0); hdp->gamma[0] = (double) sample_gamma; } void sample_middle_gammas_internal(HierarchicalDirichletProcess* hdp, int64_t depth, double sum_log_w, int64_t sum_s, int64_t num_depth_fctrs) { double gamma_alpha = hdp->gamma_alpha[depth]; double gamma_beta = hdp->gamma_beta[depth]; float gamma_alpha_post = (float) (gamma_alpha + (double) (num_depth_fctrs - sum_s)); float gamma_beta_post = (float) (gamma_beta - sum_log_w); // note: different parameterization switches alpha and beta hdp->gamma[depth] = (double) gengam(gamma_beta_post, gamma_alpha_post); } void sample_gammas(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in, int64_t thinning, int64_t* sample_counter, int64_t num_samples) { int64_t iter = *iter_counter; int64_t samples_taken = *sample_counter; int64_t tree_depth = hdp->depth; double* w = hdp->w_aux_vector; bool* s = hdp->s_aux_vector; int64_t* num_depth_fctrs = (int64_t*) malloc(sizeof(int64_t) * tree_depth); double* sum_log_w = (double*) malloc(sizeof(double) * tree_depth); int64_t* sum_s = (int64_t*) malloc(sizeof(int64_t) * tree_depth); for (int64_t depth = 0; depth < tree_depth; depth++) { num_depth_fctrs[depth] = 0; sum_log_w[depth] = 0.0; sum_s[depth] = 0; } int64_t num_dps = hdp->num_dps; DirichletProcess** dps = hdp->dps; DirichletProcess* dp; int64_t dp_depth; for (int64_t id = 0; id < num_dps; id++) { dp = dps[id]; if (!dp->observed) { continue; } dp_depth = dp->depth; num_depth_fctrs[dp_depth] += stSet_size(dp->factors); sum_log_w[dp_depth] += log(w[id]); if (s[id]) sum_s[dp_depth]++; } for (int64_t depth = 0; depth < tree_depth; depth++) { if (depth == 0) { sample_base_gamma_internal(hdp, sum_log_w[depth], num_depth_fctrs[depth]); } else { sample_middle_gammas_internal(hdp, depth, sum_log_w[depth], sum_s[depth], num_depth_fctrs[depth]); } iter++; if (iter % thinning == 0) { if (iter > burn_in) { take_distr_sample(dp->hdp); samples_taken++; if (samples_taken >= num_samples) { break; } } } } free(sum_log_w); free(sum_s); free(num_depth_fctrs); *iter_counter = iter; *sample_counter = samples_taken; } void sample_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* iter_counter, int64_t burn_in, int64_t thinning, int64_t* sample_counter, int64_t num_samples) { sample_gamma_aux_vars(hdp); sample_gammas(hdp, iter_counter, burn_in, thinning, sample_counter, num_samples); } double snapshot_joint_log_density_internal(Factor* fctr) { if (fctr->factor_type == DATA_PT) { return log(data_pt_factor_parent_likelihood(fctr, fctr->parent)); } else { double log_density = 0.0; stSetIterator* child_fctr_iter = stSet_getIterator(fctr->children); Factor* child_fctr = stSet_getNext(child_fctr_iter); while (child_fctr != NULL) { log_density += snapshot_joint_log_density_internal(child_fctr); child_fctr = stSet_getNext(child_fctr_iter); } stSet_destructIterator(child_fctr_iter); return log_density; } } double snapshot_joint_log_density(HierarchicalDirichletProcess* hdp) { double log_density = 0.0; stSetIterator* base_fctr_iter = stSet_getIterator(hdp->base_dp->factors); Factor* base_fctr = stSet_getNext(base_fctr_iter); while (base_fctr != NULL) { log_density += snapshot_joint_log_density_internal(base_fctr); base_fctr = stSet_getNext(base_fctr_iter); } stSet_destructIterator(base_fctr_iter); return log_density; } int64_t* snapshot_num_factors(HierarchicalDirichletProcess* hdp, int64_t* length_out) { int64_t length = hdp->num_dps; *length_out = length; int64_t* snapshot = (int64_t*) malloc(sizeof(int64_t) * length); DirichletProcess** dps = hdp->dps; for (int64_t i = 0; i < length; i++) { snapshot[i] = (int64_t) stSet_size((dps[i])->factors); } return snapshot; } double* snapshot_gamma_params(HierarchicalDirichletProcess* hdp, int64_t* length_out) { int64_t length = hdp->depth; *length_out = length; double* snapshot = (double*) malloc(sizeof(double) * length); double* gammas = hdp->gamma; for (int64_t i = 0; i < length; i++) { snapshot[i] = gammas[i]; } return snapshot; } double snapshot_factor_log_likelihood(Factor* fctr) { double parent_prob; double cumul = 0.0; if (fctr->factor_type == BASE) { fprintf(stderr, "Cannot snapshot base factor log likelihood.\n"); exit(EXIT_FAILURE); } else if (fctr->factor_type == DATA_PT) { Factor* parent_fctr = fctr->parent; DirichletProcess* parent_dp = parent_fctr->dp; stSet* pool = parent_dp->factors; int64_t num_fctrs = stSet_size(pool); stSetIterator* pool_iter = stSet_getIterator(pool); Factor* fctr_option; double fctr_size; double prob; for (int64_t i = 0; i < num_fctrs; i++) { fctr_option = (Factor*) stSet_getNext(pool_iter); fctr_size = (double) stSet_size(fctr_option->children); prob = fctr_size * data_pt_factor_parent_likelihood(fctr, fctr_option); cumul += prob; if (fctr_option == parent_fctr) { parent_prob = prob; } } stSet_destructIterator(pool_iter); double gamma_param = *(parent_dp->gamma); cumul += gamma_param * unobserved_factor_likelihood(fctr, parent_dp); } else { DirichletProcess* dp = fctr->dp; DirichletProcess* parent_dp = dp->parent; Factor* parent_fctr = fctr->parent; double mean, sum_sq_devs; int64_t num_data; get_factor_stats(fctr, &mean, &sum_sq_devs, &num_data); dp->cached_factor_mean = mean; dp->cached_factor_size = num_data; dp->cached_factor_sum_sq_dev = sum_sq_devs; stSet* pool = parent_dp->factors; int64_t num_fctrs = stSet_size(pool); int64_t num_choices = num_fctrs + 1; double* log_probs = (double*) malloc(sizeof(double) * num_choices); stSetIterator* pool_iter = stSet_getIterator(pool); Factor* fctr_option; double log_prob; double parent_log_prob; double fctr_size; for (int64_t i = 0; i < num_fctrs; i++) { fctr_option = (Factor*) stSet_getNext(pool_iter); fctr_size = (double) stSet_size(fctr_option->children); log_prob = factor_parent_joint_log_likelihood(fctr, fctr_option) + log(fctr_size); log_probs[i] = log_prob; if (fctr_option == parent_fctr) { parent_log_prob = log_prob; } } stSet_destructIterator(pool_iter); double gamma_param = *(dp->gamma); log_probs[num_fctrs] = unobserved_factor_joint_log_likelihood(fctr, parent_dp) + log(gamma_param); double normalizing_const = max(log_probs, num_choices); parent_prob = exp(parent_log_prob - normalizing_const); for (int64_t i = 0; i < num_choices; i++) { cumul += exp(log_probs[i] - normalizing_const);; } free(log_probs); } // TODO: this is a hack, makes it inaccurate for the early iterations if (parent_prob == 0.0) { return 0.0; } return (log(parent_prob) - log(cumul)) / 1000.0; } double snapshot_dir_proc_log_likelihood(DirichletProcess* dp) { double log_likelihood = 0.0; stSetIterator* fctr_iter = stSet_getIterator(dp->factors); Factor* fctr = (Factor*) stSet_getNext(fctr_iter); stSetIterator* child_fctr_iter; Factor* child_fctr; while (fctr != NULL) { child_fctr_iter = stSet_getIterator(fctr->children); child_fctr = (Factor*) stSet_getNext(child_fctr_iter); while (child_fctr != NULL) { log_likelihood += snapshot_factor_log_likelihood(child_fctr); child_fctr = (Factor*) stSet_getNext(child_fctr_iter); } stSet_destructIterator(child_fctr_iter); fctr = (Factor*) stSet_getNext(fctr_iter); } stSet_destructIterator(fctr_iter); return log_likelihood; } double snapshot_log_likelihood(HierarchicalDirichletProcess* hdp) { double log_likelihood = 0.0; int64_t num_dps = hdp->num_dps; DirichletProcess** dps = hdp->dps; DirichletProcess* dp; for (int64_t id = 0; id < num_dps; id++){ dp = dps[id]; if (!dp->observed) { continue; } log_likelihood += snapshot_dir_proc_log_likelihood(dp); } return log_likelihood; } void take_snapshot(HierarchicalDirichletProcess* hdp, int64_t** num_dp_fctrs_out, int64_t* num_dps_out, double** gamma_params_out, int64_t* num_gamma_params_out, double* log_likelihood_out, double* log_density_out) { *num_dp_fctrs_out = snapshot_num_factors(hdp, num_dps_out); *gamma_params_out = snapshot_gamma_params(hdp, num_gamma_params_out); *log_likelihood_out = snapshot_log_likelihood(hdp); *log_density_out = snapshot_joint_log_density(hdp); } void execute_gibbs_sampling(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in, int64_t thinning, bool verbose) { execute_gibbs_sampling_with_snapshots(hdp, num_samples, burn_in, thinning, NULL, NULL, verbose); } void execute_gibbs_sampling_with_snapshots(HierarchicalDirichletProcess* hdp, int64_t num_samples, int64_t burn_in, int64_t thinning, void (*snapshot_func)(HierarchicalDirichletProcess*, void*), void* snapshot_func_args, bool verbose) { if (hdp->data == NULL || hdp->data_pt_dp_id == NULL) { fprintf(stderr, "Cannot perform Gibbs sampling before passing data to HDP.\n"); exit(EXIT_FAILURE); } if (!hdp->finalized) { fprintf(stderr, "Cannot perform Gibbs sampling before finalizing HDP structure.\n"); exit(EXIT_FAILURE); } int64_t prev_sweep_iter_count = 0; int64_t sweep_counter = 1; int64_t iter_counter = 0; int64_t sample_counter = 0; int64_t num_dps = hdp->num_dps; int64_t non_data_pt_samples = 0; DirichletProcess** sampling_dps; while (sample_counter < num_samples) { if (verbose) { if (sweep_counter > 1) { non_data_pt_samples = iter_counter - prev_sweep_iter_count - hdp->data_length; } fprintf(stderr, "Beginning sweep %"PRId64". Performed %"PRId64" sampling iterations. Previous sweep sampled from ~%"PRId64" non-data point factors. Collected %"PRId64" of %"PRId64" distribution samples.\n", sweep_counter, iter_counter, non_data_pt_samples, sample_counter, num_samples); prev_sweep_iter_count = iter_counter; sweep_counter++; } if (snapshot_func != NULL) { snapshot_func(hdp, snapshot_func_args); } sampling_dps = get_shuffled_dps(hdp); for (int64_t i = 0; i < num_dps; i++) { sample_dp_factors(sampling_dps[i], &iter_counter, burn_in, thinning, &sample_counter, num_samples); if (sample_counter >= num_samples) { break; } } free(sampling_dps); if (hdp->sample_gamma && sample_counter < num_samples) { sample_gamma_params(hdp, &iter_counter, burn_in, thinning, &sample_counter, num_samples); } } } void finalize_distributions(HierarchicalDirichletProcess* hdp) { if (hdp->samples_taken <= 0) { fprintf(stderr, "Must perform Gibbs sampling before finalizing sampled distributions.\n"); exit(EXIT_FAILURE); } if (hdp->splines_finalized) { fprintf(stderr, "Distributions have already been finalized.\n"); exit(EXIT_FAILURE); } double inv_sample_size = 1.0 / ((double) hdp->samples_taken); int64_t grid_length = hdp->grid_length; double* grid = hdp->sampling_grid; int64_t num_dps = hdp->num_dps; DirichletProcess** dps = hdp->dps; DirichletProcess* dp; double* distr; for (int64_t id = 0; id < num_dps; id++){ dp = dps[id]; if (!dp->observed) { continue; } distr = dp->posterior_predictive; for (int64_t i = 0; i < grid_length; i++) { distr[i] = distr[i] * inv_sample_size; } dp->spline_slopes = spline_knot_slopes(grid, distr, grid_length); } hdp->splines_finalized = true; } double dir_proc_density(HierarchicalDirichletProcess* hdp, double x, int64_t dp_id) { if (!hdp->splines_finalized) { fprintf(stderr, "Must finalize distributions before querying densities.\n"); exit(EXIT_FAILURE); } if (dp_id < 0 || dp_id >= hdp->num_dps) { fprintf(stderr, "Hierarchical Dirichlet process has no Dirichlet process with this ID.\n"); exit(EXIT_FAILURE); } DirichletProcess* dp = hdp->dps[dp_id]; while (!dp->observed) { dp = dp->parent; } double interp = grid_spline_interp(x, hdp->sampling_grid, dp->posterior_predictive, dp->spline_slopes, hdp->grid_length); if (interp > 0.0) { return interp; } else { return 0.0; } } double get_dir_proc_distance(DistributionMetricMemo* memo, int64_t dp_id_1, int64_t dp_id_2) { int64_t num_dps = memo->num_distrs; if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps || dp_id_2 >= num_dps) { fprintf(stderr, "Invalid Dirchlet process ID.\n"); exit(EXIT_FAILURE); } if (dp_id_1 == dp_id_2) { return 0.0; } if (dp_id_1 < dp_id_2) { return get_dir_proc_distance(memo, dp_id_2, dp_id_1); } int64_t idx = ((dp_id_1 - 1) * dp_id_1) / 2 + dp_id_2; double* matrix = memo->memo_matrix; if (matrix[idx] < 0) { matrix[idx] = memo->metric_func(memo->hdp, dp_id_1, dp_id_2); } return matrix[idx]; } double dir_proc_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2, double (*dist_func)(double*, double*, double*, int64_t)) { if (!hdp->splines_finalized) { fprintf(stderr, "Cannot compute a Shannon-Jensen divergence before finalizing distributions.\n"); exit(EXIT_FAILURE); } int64_t grid_length = hdp->grid_length; double* grid = hdp->sampling_grid; DirichletProcess* dp_1 = hdp->dps[dp_id_1]; DirichletProcess* dp_2 = hdp->dps[dp_id_2]; while (!dp_1->observed) { dp_1 = dp_1->parent; } while (!dp_2->observed) { dp_2 = dp_2->parent; } double* distr_1 = dp_1->posterior_predictive; double* distr_2 = dp_2->posterior_predictive; return dist_func(grid, distr_1, distr_2, grid_length); } double kl_divergence(double* x, double* distr_1, double* distr_2, int64_t length) { double divergence = 0.0; double left_pt = distr_1[0] * log(distr_1[0] / distr_2[0]) + distr_2[0] * log(distr_2[0] / distr_1[0]); double right_pt; double dx; for (int64_t i = 1; i < length; i++) { right_pt = distr_1[i] * log(distr_1[i] / distr_2[i]) + distr_2[i] * log(distr_2[i] / distr_1[i]); dx = x[i] - x[i - 1]; divergence += 0.5 * (left_pt + right_pt) * dx; left_pt = right_pt; } return divergence; } double dir_proc_kl_divergence(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) { return dir_proc_distance(hdp, dp_id_1, dp_id_2, &kl_divergence); } DistributionMetricMemo* new_kl_divergence_memo(HierarchicalDirichletProcess* hdp) { return new_distr_metric_memo(hdp, &dir_proc_kl_divergence); } double hellinger_distance(double* x, double* distr_1, double* distr_2, int64_t length) { double integral = 0.0; double left_pt = sqrt(distr_1[0] * distr_2[0]); double right_pt; double dx; for (int64_t i = 1; i < length; i++) { right_pt = sqrt(distr_1[i] * distr_2[i]); dx = x[i] - x[i - 1]; integral += 0.5 * (left_pt + right_pt) * dx; left_pt = right_pt; } return sqrt(1.0 - integral); } double dir_proc_hellinger_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) { return dir_proc_distance(hdp, dp_id_1, dp_id_2, &hellinger_distance); } DistributionMetricMemo* new_hellinger_distance_memo(HierarchicalDirichletProcess* hdp) { return new_distr_metric_memo(hdp, &dir_proc_hellinger_distance); } double l2_distance(double* x, double* distr_1, double* distr_2, int64_t length) { double integral = 0.0; double diff = distr_1[0] - distr_2[0]; double left_pt = diff * diff; double right_pt; double dx; for (int64_t i = 1; i < length; i++) { diff = distr_1[i] - distr_2[i]; right_pt = diff * diff; dx = x[i] - x[i - 1]; integral += 0.5 * (left_pt + right_pt) * dx; left_pt = right_pt; } return sqrt(integral); } double dir_proc_l2_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) { return dir_proc_distance(hdp, dp_id_1, dp_id_2, &l2_distance); } DistributionMetricMemo* new_l2_distance_memo(HierarchicalDirichletProcess* hdp) { return new_distr_metric_memo(hdp, &dir_proc_l2_distance); } double shannon_jensen_distance(double* x, double* distr_1, double* distr_2, int64_t length) { double divergence = 0.0; double mean_distr_pt = 0.5 * (distr_1[0] + distr_2[0]); double left_pt = 0.5 * (distr_1[0] * log(distr_1[0] / mean_distr_pt) + distr_2[0] * log(distr_2[0] / mean_distr_pt)); double right_pt; double dx; for (int64_t i = 1; i < length; i++) { mean_distr_pt = 0.5 * (distr_1[i] + distr_2[i]); right_pt = 0.5 * (distr_1[i] * log(distr_1[i] / mean_distr_pt) + distr_2[i] * log(distr_2[i] / mean_distr_pt)); dx = x[i] - x[i - 1]; divergence += 0.5 * (left_pt + right_pt) * dx; left_pt = right_pt; } return sqrt(divergence); } double dir_proc_shannon_jensen_distance(HierarchicalDirichletProcess* hdp, int64_t dp_id_1, int64_t dp_id_2) { return dir_proc_distance(hdp, dp_id_1, dp_id_2, &shannon_jensen_distance); } DistributionMetricMemo* new_shannon_jensen_distance_memo(HierarchicalDirichletProcess* hdp) { return new_distr_metric_memo(hdp, &dir_proc_shannon_jensen_distance); } double compare_hdp_distrs(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, // this HDP is the master for grid samples HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2, double (*dist_func)(double*, double*, double*, int64_t)) { if (!hdp_1->splines_finalized || !hdp_2->splines_finalized) { fprintf(stderr, "Must finalize distributions of both hierarchical Dirichelt processes before comparing.\n"); exit(EXIT_FAILURE); } int64_t num_dps_1 = hdp_1->num_dps; int64_t num_dps_2 = hdp_2->num_dps; if (dp_id_1 < 0 || dp_id_2 < 0 || dp_id_1 >= num_dps_1 || dp_id_2 >= num_dps_2) { fprintf(stderr, "Invalid Dirchlet process ID.\n"); exit(EXIT_FAILURE); } double* grid = hdp_1->sampling_grid; int64_t grid_length = hdp_1->grid_length; DirichletProcess* dp_1 = hdp_1->dps[dp_id_1]; while (!dp_1->observed) { dp_1 = dp_1->parent; } double* distr_1 = dp_1->posterior_predictive; double* distr_2 = (double*) malloc(sizeof(double) * grid_length); for (int64_t i = 0; i < grid_length; i++) { distr_2[i] = dir_proc_density(hdp_2, grid[i], dp_id_2); } return dist_func(grid, distr_1, distr_2, grid_length); } double compare_hdp_distrs_kl_divergence(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) { return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &kl_divergence); } double compare_hdp_distrs_l2_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) { return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &l2_distance); } double compare_hdp_distrs_shannon_jensen_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) { return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &shannon_jensen_distance); } double compare_hdp_distrs_hellinger_distance(HierarchicalDirichletProcess* hdp_1, int64_t dp_id_1, HierarchicalDirichletProcess* hdp_2, int64_t dp_id_2) { return compare_hdp_distrs(hdp_1, dp_id_1, hdp_2, dp_id_2, &hellinger_distance); } void serialize_factor_tree_internal(FILE* out, Factor* fctr, int64_t parent_id, int64_t* next_fctr_id, uintptr_t data_start) { int64_t id = *next_fctr_id; (*next_fctr_id)++; // factor type if (fctr->factor_type == BASE) { fprintf(out, "0\t"); } else if (fctr->factor_type == MIDDLE) { fprintf(out, "1\t"); } else { fprintf(out, "2\t"); } // parent id if (fctr->factor_type == BASE) { fprintf(out, "-\t"); } else { fprintf(out, "%"PRId64"\t", parent_id); } // extra data based on type if (fctr->factor_type == BASE) { // cached params double* param_array = fctr->factor_data; for (int64_t i = 0; i < N_IG_NUM_PARAMS; i++) { fprintf(out, "%.17lg;", param_array[i]); } fprintf(out, "%.17lg", param_array[N_IG_NUM_PARAMS]); } else if (fctr->factor_type == MIDDLE) { // dp id fprintf(out, "%"PRId64, fctr->dp->id); } else { // data index uintptr_t data_pos = (uintptr_t) fctr->factor_data; fprintf(out, "%"PRId64, ((int64_t) (data_pos - data_start)) / sizeof(int64_t)); } fprintf(out, "\n"); if (fctr->children != NULL) { stSetIterator* iter = stSet_getIterator(fctr->children); Factor* child_fctr = (Factor*) stSet_getNext(iter); while (child_fctr != NULL) { serialize_factor_tree_internal(out, child_fctr, id, next_fctr_id, data_start); child_fctr = (Factor*) stSet_getNext(iter); } stSet_destructIterator(iter); } } void serialize_hdp(HierarchicalDirichletProcess* hdp, FILE* out) { int64_t num_dps = hdp->num_dps; int64_t num_data = hdp->data_length; double* data = hdp->data; int64_t* dp_ids = hdp->data_pt_dp_id; int64_t grid_length = hdp->grid_length; double* grid = hdp->sampling_grid; int64_t depth = hdp->depth; double* gamma_params = hdp->gamma; double* gamma_alpha = hdp->gamma_alpha; double* gamma_beta = hdp->gamma_beta; double* w_aux_vector = hdp->w_aux_vector; bool* s_aux_vector = hdp->s_aux_vector; DirichletProcess** dps = hdp->dps; DirichletProcess* base_dp = hdp->base_dp; bool has_data = hdp->data != NULL; if (!hdp->finalized) { fprintf(stderr, "Can only serialize HierarchicalDirichletProcess with finalized structure"); exit(EXIT_FAILURE); } // splines finalized fprintf(out, "%"PRId64"\n", (int64_t) hdp->splines_finalized); // has data fprintf(out, "%"PRId64"\n", (int64_t) has_data); // sample gamma fprintf(out, "%"PRId64"\n", (int64_t) hdp->sample_gamma); // num dps fprintf(out, "%"PRId64"\n", num_dps); // data if (has_data) { for (int64_t i = 0; i < num_data - 1; i++) { fprintf(out, "%.17lg\t", data[i]); } fprintf(out, "%.17lg\n", data[num_data - 1]); // dp ids for (int64_t i = 0; i < num_data - 1; i++) { fprintf(out, "%"PRId64"\t", dp_ids[i]); } fprintf(out, "%"PRId64"\n", dp_ids[num_data - 1]); } // base params fprintf(out, "%.17lg\t%.17lg\t%.17lg\t%.17lg\n", hdp->mu, hdp->nu, (hdp->two_alpha) / 2.0, hdp->beta); // sampling grid fprintf(out, "%.17lg\t%.17lg\t%"PRId64"\n", grid[0], grid[grid_length - 1], grid_length); // gamma for (int64_t i = 0; i < depth - 1; i++) { fprintf(out, "%.17lg\t", gamma_params[i]); } fprintf(out, "%.17lg\n", gamma_params[depth - 1]); // gamma distr params if (hdp->sample_gamma) { // alpha for (int64_t i = 0; i < depth - 1; i++) { fprintf(out, "%.17lg\t", gamma_alpha[i]); } fprintf(out, "%.17lg\n", gamma_alpha[depth - 1]); // beta for (int64_t i = 0; i < depth - 1; i++) { fprintf(out, "%.17lg\t", gamma_beta[i]); } fprintf(out, "%.17lg\n", gamma_beta[depth - 1]); // w for (int64_t i = 0; i < num_dps - 1; i++) { fprintf(out, "%.17lg\t", w_aux_vector[i]); } fprintf(out, "%.17lg\n", w_aux_vector[num_dps - 1]); // s for (int64_t i = 0; i < num_dps - 1; i++) { fprintf(out, "%"PRId64"\t", (int64_t) s_aux_vector[i]); } fprintf(out, "%"PRId64"\n", (int64_t) s_aux_vector[num_dps - 1]); } // dp parents DirichletProcess* dp; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; // parent if (dp == base_dp) { fprintf(out, "-\t%"PRId64"\n", dp->num_factor_children); } else { fprintf(out, "%"PRId64"\t%"PRId64"\n", dp->parent->id, dp->num_factor_children); } } // post preds if (has_data) { double* post_pred; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; post_pred = dp->posterior_predictive; if (post_pred != NULL) { for (int64_t j = 0; j < grid_length - 1; j++) { fprintf(out, "%.17lg\t", post_pred[j]); } fprintf(out, "%.17lg", post_pred[grid_length - 1]); } fprintf(out, "\n"); } } // spline slopes if (hdp->splines_finalized) { double* slopes; for (int64_t i = 0; i < num_dps; i++) { dp = dps[i]; slopes = dp->spline_slopes; if (slopes != NULL) { for (int64_t i = 0; i < grid_length - 1; i++) { fprintf(out, "%.17lg\t", slopes[i]); } fprintf(out, "%.17lg", slopes[grid_length - 1]); } fprintf(out, "\n"); } } // factors if (has_data) { int64_t next_fctr_id = 0; uintptr_t data_start = (uintptr_t) hdp->data; stSetIterator* iter = stSet_getIterator(base_dp->factors); Factor* fctr = (Factor*) stSet_getNext(iter); while (fctr != NULL) { serialize_factor_tree_internal(out, fctr, -1, &next_fctr_id, data_start); fctr = (Factor*) stSet_getNext(iter); } stSet_destructIterator(iter); } } HierarchicalDirichletProcess* deserialize_hdp(FILE* in) { // splines finalized char* end; char* line = stFile_getLineFromFile(in); bool splines_finalized = (bool) strtol(line, &end, 10); free(line); // has data line = stFile_getLineFromFile(in); bool has_data = (bool) strtol(line, &end, 10); free(line); // sample gamma line = stFile_getLineFromFile(in); bool sample_gamma = (bool) strtol(line, &end, 10); free(line); // num dps line = stFile_getLineFromFile(in); int64_t num_dps = (int64_t) strtol(line, &end, 10); free(line); double* data; int64_t* dp_ids; int64_t data_length; stList* tokens; if (has_data) { // data line = stFile_getLineFromFile(in); tokens = stString_split(line); data_length = stList_length(tokens); data = (double*) malloc(sizeof(double) * data_length); for (int64_t i = 0; i < data_length; i++) { sscanf(stList_get(tokens, i), "%lf", &(data[i])); } free(line); stList_destruct(tokens); // dp ids line = stFile_getLineFromFile(in); tokens = stString_split(line); dp_ids = (int64_t*) malloc(sizeof(int64_t) * data_length); for (int64_t i = 0; i < data_length; i++) { sscanf((char*) stList_get(tokens, i), "%"SCNd64, &(dp_ids[i])); } free(line); stList_destruct(tokens); } // base params line = stFile_getLineFromFile(in); double mu, nu, alpha, beta; sscanf(line, "%lg\t%lg\t%lg\t%lg", &mu, &nu, &alpha, &beta); free(line); // sampling grid line = stFile_getLineFromFile(in); double grid_start, grid_stop; int64_t grid_length; sscanf(line, "%lg\t%lg\t%"SCNd64, &grid_start, &grid_stop, &grid_length); free(line); // gamma line = stFile_getLineFromFile(in); tokens = stString_split(line); int64_t depth = stList_length(tokens); double* gamma_params = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_params[i])); } free(line); stList_destruct(tokens); // gamma distr params double* gamma_alpha; double* gamma_beta; double* w; bool* s; int64_t s_int; if (sample_gamma) { line = stFile_getLineFromFile(in); tokens = stString_split(line); // gamma alpha gamma_alpha = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_alpha[i])); } free(line); stList_destruct(tokens); // gamma beta line = stFile_getLineFromFile(in); tokens = stString_split(line); gamma_beta = (double*) malloc(sizeof(double) * depth); for (int64_t i = 0; i < depth; i++) { sscanf((char*) stList_get(tokens, i), "%lf", &(gamma_beta[i])); } free(line); stList_destruct(tokens); // w line = stFile_getLineFromFile(in); tokens = stString_split(line); w = (double*) malloc(sizeof(double) * num_dps); for (int64_t i = 0; i < num_dps; i++) { sscanf((char*) stList_get(tokens, i), "%lf", &(w[i])); } free(line); stList_destruct(tokens); // s line = stFile_getLineFromFile(in); tokens = stString_split(line); s = (bool*) malloc(sizeof(bool) * num_dps); for (int64_t i = 0; i < num_dps; i++) { sscanf((char*) stList_get(tokens, i), "%"SCNd64, &s_int); s[i] = (bool) s_int; } free(line); stList_destruct(tokens); } // construct hdp HierarchicalDirichletProcess* hdp; if (sample_gamma) { hdp = new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, grid_start, grid_stop, grid_length, mu, nu, alpha, beta); for (int64_t i = 0; i < depth; i++) { hdp->gamma[i] = gamma_params[i]; } free(gamma_params); for (int64_t i = 0; i < num_dps; i++) { hdp->w_aux_vector[i] = w[i]; hdp->s_aux_vector[i] = s[i]; } free(w); free(s); } else { hdp = new_hier_dir_proc(num_dps, depth, gamma_params, grid_start, grid_stop, grid_length, mu, nu, alpha, beta); } DirichletProcess** dps = hdp->dps; DirichletProcess* dp; // dp parents and num children int64_t parent_id; int64_t num_factor_children; for (int64_t id = 0; id < num_dps; id++) { line = stFile_getLineFromFile(in); if (line[0] != '-') { sscanf(line, "%"SCNd64"\t%"SCNd64, &parent_id, &num_factor_children); set_dir_proc_parent(hdp, id, parent_id); (dps[id])->num_factor_children = num_factor_children; } else { sscanf(line, "-\t%"SCNd64, &num_factor_children); (dps[id])->num_factor_children = num_factor_children; } free(line); } finalize_hdp_structure(hdp); // give it data if (has_data) { // note: don't use pass_hdp_data because want to manually init factors hdp->data = data; hdp->data_pt_dp_id = dp_ids; hdp->data_length = data_length; verify_valid_dp_assignments(hdp); mark_observed_dps(hdp); // post predictives double* post_pred; for (int64_t id = 0; id < num_dps; id++) { dp = dps[id]; line = stFile_getLineFromFile(in); stList* tokens = stString_split(line); if (stList_length(tokens) != 0) { free(dp->posterior_predictive); dp->posterior_predictive = (double*) malloc(sizeof(double) * grid_length); post_pred = dp->posterior_predictive; for (int64_t i = 0; i < grid_length; i++) { sscanf((char*) stList_get(tokens, i), "%lf\n", &(post_pred[i])); } } free(line); stList_destruct(tokens); } } double* spline_slopes; if (splines_finalized) { hdp->splines_finalized = true; for (int64_t id = 0; id < num_dps; id++) { dp = dps[id]; line = stFile_getLineFromFile(in); stList* tokens = stString_split(line); if (stList_length(tokens) != 0) { spline_slopes = (double*) malloc(sizeof(double) * grid_length); dp->spline_slopes = spline_slopes; for (int64_t i = 0; i < grid_length; i++) { sscanf((char*) stList_get(tokens, i), "%lf", &(spline_slopes[i])); } } free(line); stList_destruct(tokens); } } if (has_data) { char* type_str; char* parent_str; char* dp_str; char* idx_str; char* params_str; int64_t type_int; int64_t dp_id; int64_t data_pt_idx; int64_t parent_idx; double* param_array; stList* params_list; Factor* fctr; Factor* parent_fctr; stList* fctr_list = stList_construct(); line = stFile_getLineFromFile(in); while (line != NULL) { tokens = stString_split(line); type_str = (char*) stList_get(tokens, 0); sscanf(type_str, "%"SCNd64, &type_int); if (type_int == 0) { fctr = new_base_factor(hdp); params_str = (char*) stList_get(tokens, 2); params_list = stString_splitByString(params_str, ";"); param_array = fctr->factor_data; for (int64_t i = 0; i < N_IG_NUM_PARAMS + 1; i++) { sscanf((char*) stList_get(params_list, i), "%lf", &param_array[i]); } stList_destruct(params_list); } else if (type_int == 1) { dp_str = (char*) stList_get(tokens, 2); sscanf(dp_str, "%"SCNd64, &dp_id); fctr = new_middle_factor(dps[dp_id]); } else if (type_int == 2) { idx_str = (char*) stList_get(tokens, 2);; sscanf(idx_str, "%"SCNd64, &data_pt_idx); fctr = new_data_pt_factor(hdp, data_pt_idx); } else { fprintf(stderr, "Deserialization error"); exit(EXIT_FAILURE); } stList_append(fctr_list, (void*) fctr); // set parent if appicable parent_str = (char*) stList_get(tokens, 1); if (parent_str[0] != '-') { sscanf(parent_str, "%"SCNd64, &parent_idx); parent_fctr = (Factor*) stList_get(fctr_list, parent_idx); fctr->parent = parent_fctr; stSet_insert(parent_fctr->children, (void*) fctr); } free(line); line = stFile_getLineFromFile(in); } stList_destruct(fctr_list); } return hdp; }
effects.c
#define _POSIX_C_SOURCE 200809 #define _XOPEN_SOURCE 700 #include <omp.h> #include <limits.h> #include <stdlib.h> #include <stdbool.h> #include <dlfcn.h> #include <string.h> #include <errno.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <spawn.h> #include <time.h> #include <stdio.h> #include "effects.h" #include "log.h" // glib might or might not have already defined MIN, // depending on whether we have pixbuf or not... #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif extern char **environ; static int screen_size_to_pix(struct waylogout_effect_screen_pos size, int screensize, int scale) { if (size.is_percent) { return (size.pos / 100.0) * screensize; } else if (size.pos > 0) { return size.pos * scale; } else { return size.pos; } } static int screen_pos_to_pix(struct waylogout_effect_screen_pos pos, int screensize, int scale) { int actual; if (pos.is_percent) { actual = (pos.pos / 100.0) * screensize; } else { actual = pos.pos * scale; } if (actual < 0) { actual = screensize + actual; } return actual; } static const char *effect_name(struct waylogout_effect *effect) { switch (effect->tag) { case EFFECT_BLUR: return "blur"; case EFFECT_PIXELATE: return "pixelate"; case EFFECT_SCALE: return "scale"; case EFFECT_GREYSCALE: return "greyscale"; case EFFECT_VIGNETTE: return "vignette"; case EFFECT_COMPOSE: return "compose"; case EFFECT_CUSTOM: return effect->e.custom; } abort(); } static void screen_pos_pair_to_pix( struct waylogout_effect_screen_pos posx, struct waylogout_effect_screen_pos posy, int objwidth, int objheight, int screenwidth, int screenheight, int scale, int gravity, int *outx, int *outy) { int x = screen_pos_to_pix(posx, screenwidth, scale); int y = screen_pos_to_pix(posy, screenheight, scale); // Adjust X switch (gravity) { case EFFECT_COMPOSE_GRAV_CENTER: case EFFECT_COMPOSE_GRAV_N: case EFFECT_COMPOSE_GRAV_S: x -= objwidth / 2; break; case EFFECT_COMPOSE_GRAV_NW: case EFFECT_COMPOSE_GRAV_SW: case EFFECT_COMPOSE_GRAV_W: break; case EFFECT_COMPOSE_GRAV_NE: case EFFECT_COMPOSE_GRAV_SE: case EFFECT_COMPOSE_GRAV_E: x -= objwidth; break; } // Adjust Y switch (gravity) { case EFFECT_COMPOSE_GRAV_CENTER: case EFFECT_COMPOSE_GRAV_W: case EFFECT_COMPOSE_GRAV_E: y -= objheight / 2; break; case EFFECT_COMPOSE_GRAV_NW: case EFFECT_COMPOSE_GRAV_NE: case EFFECT_COMPOSE_GRAV_N: break; case EFFECT_COMPOSE_GRAV_SW: case EFFECT_COMPOSE_GRAV_SE: case EFFECT_COMPOSE_GRAV_S: y -= objheight; break; } *outx = x; *outy = y; } static uint32_t blend_pixels(float alpha, uint32_t srcpix, uint32_t destpix) { uint8_t srcr = (srcpix & 0x00ff0000) >> 16; uint8_t destr = (destpix & 0x00ff0000) >> 16; uint8_t srcg = (srcpix & 0x0000ff00) >> 8; uint8_t destg = (destpix & 0x0000ff00) >> 8; uint8_t srcb = (srcpix & 0x000000ff) >> 0; uint8_t destb = (destpix & 0x000000ff) >> 0; return (uint32_t)0 | (uint32_t)255 << 24 | (uint32_t)(srcr + destr * (1 - alpha)) << 16 | (uint32_t)(srcg + destg * (1 - alpha)) << 8 | (uint32_t)(srcb + destb * (1 - alpha)) << 0; } static void blur_h(uint32_t *dest, uint32_t *src, int width, int height, int radius) { const int minradius = radius < width ? radius : width; #pragma omp parallel for for (int y = 0; y < height; ++y) { uint32_t *srow = src + y * width; uint32_t *drow = dest + y * width; // 'range' is float, because floating point division is usually faster // than integer division. int r_acc = 0; int g_acc = 0; int b_acc = 0; float range = minradius; // Accumulate the range (0..radius) for (int x = 0; x < minradius; ++x) { r_acc += (srow[x] & 0xff0000) >> 16; g_acc += (srow[x] & 0x00ff00) >> 8; b_acc += (srow[x] & 0x0000ff); } // Deal with the main body for (int x = 0; x < width; ++x) { if (x >= minradius) { r_acc -= (srow[x - radius] & 0xff0000) >> 16; g_acc -= (srow[x - radius] & 0x00ff00) >> 8; b_acc -= (srow[x - radius] & 0x0000ff); range -= 1; } if (x < width - minradius) { r_acc += (srow[x + radius] & 0xff0000) >> 16; g_acc += (srow[x + radius] & 0x00ff00) >> 8; b_acc += (srow[x + radius] & 0x0000ff); range += 1; } drow[x] = 0 | (int)(r_acc / range) << 16 | (int)(g_acc / range) << 8 | (int)(b_acc / range); } } } static void blur_v(uint32_t *dest, uint32_t *src, int width, int height, int radius) { const int minradius = radius < height ? radius : height; #pragma omp parallel for for (int x = 0; x < width; ++x) { uint32_t *scol = src + x; uint32_t *dcol = dest + x; // 'range' is float, because floating point division is usually faster // than integer division. int r_acc = 0; int g_acc = 0; int b_acc = 0; float range = minradius; // Accumulate the range (0..radius) for (int y = 0; y < minradius; ++y) { r_acc += (scol[y * width] & 0xff0000) >> 16; g_acc += (scol[y * width] & 0x00ff00) >> 8; b_acc += (scol[y * width] & 0x0000ff); } // Deal with the main body for (int y = 0; y < height; ++y) { if (y >= minradius) { r_acc -= (scol[(y - radius) * width] & 0xff0000) >> 16; g_acc -= (scol[(y - radius) * width] & 0x00ff00) >> 8; b_acc -= (scol[(y - radius) * width] & 0x0000ff); range -= 1; } if (y < height - minradius) { r_acc += (scol[(y + radius) * width] & 0xff0000) >> 16; g_acc += (scol[(y + radius) * width] & 0x00ff00) >> 8; b_acc += (scol[(y + radius) * width] & 0x0000ff); range += 1; } dcol[y * width] = 0 | (int)(r_acc / range) << 16 | (int)(g_acc / range) << 8 | (int)(b_acc / range); } } } static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch, int width, int height, int radius) { blur_h(scratch, src, width, height, radius); blur_v(dest, scratch, width, height, radius); } // This effect_blur function, and the associated blur_* functions, // are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid: // https://github.com/yvbbrjdr/i3lock-fancy-rapid static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height, int scale, int radius, int times) { uint32_t *origdest = dest; uint32_t *scratch = malloc(width * height * sizeof(*scratch)); blur_once(dest, src, scratch, width, height, radius * scale); for (int i = 0; i < times - 1; ++i) { uint32_t *tmp = src; src = dest; dest = tmp; blur_once(dest, src, scratch, width, height, radius * scale); } free(scratch); // We're flipping between using dest and src; // if the last buffer we used was src, copy that over to dest. if (dest != origdest) memcpy(origdest, dest, width * height * sizeof(*dest)); } static void effect_pixelate(uint32_t *data, int width, int height, int scale, int factor) { factor *= scale; #pragma omp parallel for for (int y = 0; y < height / factor + 1; ++y) { for (int x = 0; x < width / factor + 1; ++x) { int total_r = 0, total_g = 0, total_b = 0; int xstart = x * factor; int ystart = y * factor; int xlim = MIN(xstart + factor, width); int ylim = MIN(ystart + factor, height); // Average for (int ry = ystart; ry < ylim; ++ry) { for (int rx = xstart; rx < xlim; ++rx) { int index = ry * width + rx; total_r += (data[index] & 0xff0000) >> 16; total_g += (data[index] & 0x00ff00) >> 8; total_b += (data[index] & 0x0000ff); } } int r = total_r / (factor * factor); int g = total_g / (factor * factor); int b = total_b / (factor * factor); // Fill pixels for (int ry = ystart; ry < ylim; ++ry) { for (int rx = xstart; rx < xlim; ++rx) { int index = ry * width + rx; data[index] = r << 16 | g << 8 | b; } } } } } static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight, double scale) { int dwidth = swidth * scale; int dheight = sheight * scale; double fact = 1.0 / scale; #pragma omp parallel for for (int dy = 0; dy < dheight; ++dy) { int sy = dy * fact; if (sy >= sheight) continue; for (int dx = 0; dx < dwidth; ++dx) { int sx = dx * fact; if (sx >= swidth) continue; dest[dy * dwidth + dx] = src[sy * swidth + sx]; } } } static void effect_greyscale(uint32_t *data, int width, int height) { #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { int index = y * width + x; int r = (data[index] & 0xff0000) >> 16; int g = (data[index] & 0x00ff00) >> 8; int b = (data[index] & 0x0000ff); int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b; if (luma < 0) luma = 0; if (luma > 255) luma = 255; luma &= 0xFF; data[index] = luma << 16 | luma << 8 | luma; } } } static void effect_vignette(uint32_t *data, int width, int height, double base, double factor) { base = fmin(1, fmax(0, base)); factor = fmin(1 - base, fmax(0, factor)); #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { double xf = (x * 1.0) / width; double yf = (y * 1.0) / height; double vignette_factor = base + factor * 16 * xf * yf * (1.0 - xf) * (1.0 - yf); int index = y * width + x; int r = (data[index] & 0xff0000) >> 16; int g = (data[index] & 0x00ff00) >> 8; int b = (data[index] & 0x0000ff); r = (int)(r * vignette_factor) & 0xFF; g = (int)(g * vignette_factor) & 0xFF; b = (int)(b * vignette_factor) & 0xFF; data[index] = r << 16 | g << 8 | b; } } } static void effect_compose(uint32_t *data, int width, int height, int scale, struct waylogout_effect_screen_pos posx, struct waylogout_effect_screen_pos posy, struct waylogout_effect_screen_pos posw, struct waylogout_effect_screen_pos posh, int gravity, char *imgpath) { #if !HAVE_GDK_PIXBUF (void)&blend_pixels; (void)&screen_size_to_pix; (void)&screen_pos_pair_to_pix; waylogout_log(LOG_ERROR, "Compose effect: Compiled without gdk_pixbuf support.\n"); return; #else int imgw = screen_size_to_pix(posw, width, scale); int imgh = screen_size_to_pix(posh, height, scale); bool preserve_aspect = imgw < 0 || imgh < 0; GError *err = NULL; GdkPixbuf *pixbuf = gdk_pixbuf_new_from_file_at_scale( imgpath, imgw, imgh, preserve_aspect, &err); if (!pixbuf) { waylogout_log(LOG_ERROR, "Compose effect: Failed to load image file '%s' (%s).", imgpath, err->message); g_error_free(err); return; } cairo_surface_t *image = gdk_cairo_image_surface_create_from_pixbuf(pixbuf); g_object_unref(pixbuf); int bufw = cairo_image_surface_get_width(image); int bufh = cairo_image_surface_get_height(image); uint32_t *bufdata = (uint32_t *)cairo_image_surface_get_data(image); int bufstride = cairo_image_surface_get_stride(image) / 4; bool bufalpha = cairo_image_surface_get_format(image) == CAIRO_FORMAT_ARGB32; int imgx, imgy; screen_pos_pair_to_pix( posx, posy, bufw, bufh, width, height, scale, gravity, &imgx, &imgy); #pragma omp parallel for for (int offy = 0; offy < bufh; ++offy) { if (offy + imgy < 0 || offy + imgy > height) continue; for (int offx = 0; offx < bufw; ++offx) { if (offx + imgx < 0 || offx + imgx > width) continue; size_t idx = (size_t)(offy + imgy) * width + (offx + imgx); size_t bufidx = (size_t)offy * bufstride + (offx); if (!bufalpha) { data[idx] = bufdata[bufidx]; } else { uint8_t alpha = (bufdata[bufidx] & 0xff000000) >> 24; if (alpha == 255) { data[idx] = bufdata[bufidx]; } else if (alpha != 0) { data[idx] = blend_pixels(alpha / 255.0, bufdata[bufidx], data[idx]); } } } } cairo_surface_destroy(image); #endif } static void effect_custom_run(uint32_t *data, int width, int height, int scale, char *path) { void *dl = dlopen(path, RTLD_LAZY); if (dl == NULL) { waylogout_log(LOG_ERROR, "Custom effect: %s", dlerror()); return; } void (*effect_func)(uint32_t *data, int width, int height, int scale) = dlsym(dl, "waylogout_effect"); if (effect_func != NULL) { effect_func(data, width, height, scale); dlclose(dl); return; } uint32_t (*pixel_func)(uint32_t pix, int x, int y, int width, int height) = dlsym(dl, "waylogout_pixel"); if (pixel_func != NULL) { #pragma omp parallel for for (int y = 0; y < height; ++y) { for (int x = 0; x < width; ++x) { data[y * width + x] = pixel_func(data[y * width + x], x, y, width, height); } } dlclose(dl); return; } (void)dlsym(dl, "waylogout_effect"); // Change the result of dlerror() waylogout_log(LOG_ERROR, "Custom effect: %s", dlerror()); } static bool file_is_outdated(const char *input, const char *output) { struct stat instat, outstat; if (stat(input, &instat) < 0) { return true; } if (stat(output, &outstat) < 0) { return true; } if (instat.st_mtim.tv_sec > outstat.st_mtim.tv_sec) { return true; } if ( instat.st_mtim.tv_sec == outstat.st_mtim.tv_sec && instat.st_mtim.tv_nsec >= outstat.st_mtim.tv_nsec) { return true; } return false; } static char *effect_custom_compile(const char *path) { static char *cachepath = NULL; static size_t cachelen; if (!cachepath) { char *xdgdir = getenv("XDG_DATA_HOME"); if (xdgdir) { cachepath = malloc(strlen(xdgdir) + strlen("/waylogout") + 1); cachelen = sprintf(cachepath, "%s/waylogout", xdgdir); } else { char *homedir = getenv("HOME"); if (homedir == NULL) { waylogout_log(LOG_ERROR, "Can't compile custom effect; neither $HOME nor $XDG_CONFIG_HOME " "is defined."); return NULL; } cachepath = malloc(strlen(homedir) + strlen("/.cache/waylogout") + 1); cachelen = sprintf(cachepath, "%s/.cache/waylogout", homedir); } if (mkdir(cachepath, 0777) < 0 && errno != EEXIST) { waylogout_log(LOG_ERROR, "Can't compile custom effect; mkdir %s failed: %s\n", cachepath, strerror(errno)); free(cachepath); cachepath = NULL; return NULL; } } // Find the true, absolute path of the input file char *abspath = realpath(path, NULL); size_t abspathlen = strlen(abspath); char *outpath = malloc(cachelen + 1 + abspathlen + 3 + 1); size_t outlen = sprintf(outpath, "%s/%s.so", cachepath, abspath); // Sanitize for (char *ch = outpath + cachelen + 1; ch < outpath + cachelen + 1 + abspathlen; ++ch) { if (!( (*ch >= 'a' && *ch <= 'z') || (*ch >= 'A' && *ch <= 'Z') || (*ch >= '0' && *ch <= '9') || (*ch == '.'))) { *ch = '_'; } } if (!file_is_outdated(path, outpath)) { free(abspath); return outpath; } static const char *fmt = "cc -shared -g -O2 -march=native -fopenmp -o '%s' '%s' -lm"; char *cmd = malloc(strlen(fmt) + outlen - 2 + abspathlen - 2 + 1); sprintf(cmd, fmt, outpath, abspath); free(abspath); fprintf(stderr, "Compiling custom effect: %s\n", cmd); // Finally, compile. int ret = system(cmd); free(cmd); if (ret != 0) { if (ret == -1) { waylogout_log(LOG_ERROR, "Custom effect: system(): %s", strerror(errno)); free(outpath); return NULL; } else { waylogout_log(LOG_ERROR, "Custom effect compilation failed\n"); free(outpath); return NULL; } } return outpath; } static void effect_custom(uint32_t *data, int width, int height, int scale, char *path) { size_t pathlen = strlen(path); if (pathlen > 3 && strcmp(path + pathlen - 3, ".so") == 0) { effect_custom_run(data, width, height, scale, path); } else if (pathlen > 2 && strcmp(path + pathlen - 2, ".c") == 0) { char *compiled = effect_custom_compile(path); if (compiled != NULL) { effect_custom_run(data, width, height, scale, compiled); free(compiled); } } else { waylogout_log( LOG_ERROR, "%s: Unknown file type for custom effect (expected .c or .so)", path); } } static cairo_surface_t *run_effect(cairo_surface_t *surface, int scale, struct waylogout_effect *effect) { switch (effect->tag) { case EFFECT_BLUR: { cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { waylogout_log(LOG_ERROR, "Failed to create surface for blur effect"); cairo_surface_destroy(surf); break; } effect_blur( (uint32_t *)cairo_image_surface_get_data(surf), (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.blur.radius, effect->e.blur.times); cairo_surface_flush(surf); cairo_surface_destroy(surface); surface = surf; break; } case EFFECT_PIXELATE: { effect_pixelate( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.pixelate.factor); cairo_surface_flush(surface); break; } case EFFECT_SCALE: { cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface) * effect->e.scale, cairo_image_surface_get_height(surface) * effect->e.scale); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { waylogout_log(LOG_ERROR, "Failed to create surface for scale effect"); cairo_surface_destroy(surf); break; } effect_scale( (uint32_t *)cairo_image_surface_get_data(surf), (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), effect->e.scale); cairo_surface_flush(surf); cairo_surface_destroy(surface); surface = surf; break; } case EFFECT_GREYSCALE: { effect_greyscale( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); cairo_surface_flush(surface); break; } case EFFECT_VIGNETTE: { effect_vignette( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), effect->e.vignette.base, effect->e.vignette.factor); cairo_surface_flush(surface); break; } case EFFECT_COMPOSE: { effect_compose( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.compose.x, effect->e.compose.y, effect->e.compose.w, effect->e.compose.h, effect->e.compose.gravity, effect->e.compose.imgpath); cairo_surface_flush(surface); break; } case EFFECT_CUSTOM: { effect_custom( (uint32_t *)cairo_image_surface_get_data(surface), cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface), scale, effect->e.custom); cairo_surface_flush(surface); break; } } return surface; } static cairo_surface_t *ensure_format(cairo_surface_t *surface) { if (cairo_image_surface_get_format(surface) == CAIRO_FORMAT_RGB24) { return surface; } waylogout_log(LOG_DEBUG, "Have to convert surface to CAIRO_FORMAT_RGB24 from %i.", (int)cairo_image_surface_get_format(surface)); cairo_surface_t *surf = cairo_image_surface_create( CAIRO_FORMAT_RGB24, cairo_image_surface_get_width(surface), cairo_image_surface_get_height(surface)); if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) { waylogout_log(LOG_ERROR, "Failed to create surface for scale effect"); cairo_surface_destroy(surf); return NULL; } memcpy( cairo_image_surface_get_data(surf), cairo_image_surface_get_data(surface), cairo_image_surface_get_stride(surface) * cairo_image_surface_get_height(surface)); cairo_surface_destroy(surface); return surf; } cairo_surface_t *waylogout_effects_run(cairo_surface_t *surface, int scale, struct waylogout_effect *effects, int count) { surface = ensure_format(surface); if (surface == NULL) return NULL; for (int i = 0; i < count; ++i) { struct waylogout_effect *effect = &effects[i]; surface = run_effect(surface, scale, effect); } return surface; } #define TIME_MSEC(tv) ((tv).tv_sec * 1000.0 + (tv).tv_nsec / 1000000.0) #define TIME_DELTA(first, last) (TIME_MSEC(last) - TIME_MSEC(first)) cairo_surface_t *waylogout_effects_run_timed(cairo_surface_t *surface, int scale, struct waylogout_effect *effects, int count) { struct timespec start_tv; clock_gettime(CLOCK_MONOTONIC, &start_tv); surface = ensure_format(surface); if (surface == NULL) return NULL; fprintf(stderr, "Running %i effects:\n", count); for (int i = 0; i < count; ++i) { struct timespec effect_start_tv; clock_gettime(CLOCK_MONOTONIC, &effect_start_tv); struct waylogout_effect *effect = &effects[i]; surface = run_effect(surface, scale, effect); struct timespec effect_end_tv; clock_gettime(CLOCK_MONOTONIC, &effect_end_tv); fprintf(stderr, " %s: %fms\n", effect_name(effect), TIME_DELTA(effect_start_tv, effect_end_tv)); } struct timespec end_tv; clock_gettime(CLOCK_MONOTONIC, &end_tv); fprintf(stderr, "Effects took %fms.\n", TIME_DELTA(start_tv, end_tv)); return surface; }
calibrator.h
#pragma once #include <fstream> #include <iostream> #include <iterator> #include <NvInfer.h> #include "common.h" #include "data_loader.h" #include "video_data_loader.h" class BaseCalibrator : public nvinfer1::IInt8EntropyCalibrator2 { protected: const size_t kBatchSize_; const size_t kImSize_; const size_t kInputSize_; size_t counter_; BatchBase inp_data_; void *device_ptr_; std::vector<char> calibration_cache_; bool read_cache_ = false; static std::string calibrationTableName() { return std::string("CalibrationTable"); // FIXME } public: BaseCalibrator(const size_t kBatchSize, const size_t kImSize) : kBatchSize_(kBatchSize), kImSize_(kImSize), kInputSize_(kBatchSize_ * kImSize_), inp_data_(kInputSize_) { counter_ = 0; cudaMalloc(&device_ptr_, kInputSize_ * sizeof(float)); } virtual ~BaseCalibrator() { cudaFree(device_ptr_); } int getBatchSize() const override { return 1; // FIXME: ONNX hack, fix once TensorRT fixes the bug } virtual void fillInpData() = 0; virtual size_t getSize() = 0; bool getBatch(void* bindings[], const char* names[], int nbBindings) override { if (counter_ / kBatchSize_ > 500 || counter_ + kBatchSize_ > getSize()) { counter_ = 0; return false; } fillInpData(); cudaMemcpy(device_ptr_, inp_data_.data(), kInputSize_ * sizeof(float), cudaMemcpyHostToDevice); bindings[0] = device_ptr_; counter_ += kBatchSize_; return true; } const void* readCalibrationCache(size_t& length) override { calibration_cache_.clear(); std::ifstream input(calibrationTableName(), std::ios::binary); input >> std::noskipws; if (read_cache_ && input.good()) { std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(calibration_cache_)); } length = calibration_cache_.size(); return length ? &calibration_cache_[0] : nullptr; } void writeCalibrationCache(const void* cache, size_t length) override { std::ofstream output(calibrationTableName(), std::ios::binary); output.write(reinterpret_cast<const char*>(cache), length); } }; class ImageCalibrator : public BaseCalibrator { private: const DataLoader *kLoader_; const std::vector<CompressedImage>& kCompressedImages_; public: ImageCalibrator(const DataLoader *kLoader, const std::vector<CompressedImage>& kCompressedImages, const size_t kBatchSize) : kLoader_(kLoader), kCompressedImages_(kCompressedImages), BaseCalibrator(kBatchSize, 3 * kLoader->GetResol() * kLoader->GetResol()) {} void fillInpData() { #pragma omp parallel for for (size_t j = 0; j < kBatchSize_; j++) { size_t offset = counter_ + j; kLoader_->DecodeAndPreproc( kCompressedImages_[offset], inp_data_.data() + j * kImSize_); } } size_t getSize() { return kCompressedImages_.size(); } }; class VideoCalibrator : public BaseCalibrator { private: const VideoDataLoader *kLoader_; const std::vector<std::string>& kFileNames_; public: VideoCalibrator(const VideoDataLoader *kLoader, const std::vector<std::string>& kFileNames, const size_t kBatchSize) : kLoader_(kLoader), kFileNames_(kFileNames), // hack due to Image BaseCalibrator(1, 3 * kBatchSize * kLoader->GetResol() * kLoader->GetResol()) {} void fillInpData() { kLoader_->DecodeAndPreprocessGOP(kFileNames_[counter_], inp_data_.data()); } size_t getSize() { return kFileNames_.size(); } };
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImage) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) ResetMagickMemory(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if (((sharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImage) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) ResetMagickMemory(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) ResetMagickMemory(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)*mult), q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if (((blur_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) <= (QuantumRange/2))) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImage) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if (((shade_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(linear_image,center) <= (QuantumRange/2))) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) ResetMagickMemory(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if (((unsharp_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) <= (QuantumRange/2))) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImage) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
transpose.c
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <string.h> #ifdef _OPENMP # include <omp.h> # define OMP_MIN_SIZE 100 #endif #ifdef MPI # include <mpi.h> #endif #include "getticks.c" #include "safemalloc.h" #define ALIGNMENT 128 #define REPEAT 16 int main(int argc, char* argv[]) { int rank = 0; #ifdef MPI int rc; int provided; MPI_Init_thread( &argc , &argv , MPI_THREAD_FUNNELED , &provided ); MPI_Comm_rank( MPI_COMM_WORLD , &rank ); #endif int min = ( argc>1 ? atoi(argv[1]) : 2 ); int max = ( argc>2 ? atoi(argv[2]) : 400 ); int inc = ( argc>3 ? atoi(argv[3]) : 1 ); if ( rank == 0 ) { unsigned long long * d0 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d1 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d2 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d3 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d4 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d5 = safemalloc( max * sizeof(unsigned long long) ); unsigned long long * d6 = safemalloc( max * sizeof(unsigned long long) ); /* 12345678901234567890123456789012 */ char * name0 = "memcpy"; char * name1 = "loop copy"; char * name2 = "basic + s1S"; char * name3 = "basic + s1L"; char * name4 = "pur 4x4 + s1L"; char * name5 = "mur 4x4 + s1L"; char * name6 = "murv 4x4 + s1L"; #ifdef OMP printf( "# starting test: OpenMP ON with %d threads... \n", omp_get_max_threads() ); fprintf( stderr , "starting test: OpenMP ON with %d threads... \n", omp_get_max_threads() ); #else printf( "# starting test: OpenMP OFF... \n" ); fprintf( stderr , "starting test: OpenMP OFF... \n" ); #endif printf( "# sN[S/L] = stride-N [store/load] \n" ); printf( "# pur = pragma unroll \n" ); printf( "# mur = manual unroll \n" ); printf( "# murv = manual unroll and vectorize \n" ); #ifdef DEBUG fprintf( stderr , "sN[S/L] = stride-N [store/load] \n" ); fprintf( stderr , "pur = pragma unroll \n" ); fprintf( stderr , "mur = manual unroll \n" ); fprintf( stderr , "murv = manual unroll and vectorize \n" ); fprintf( stderr , "%4s %8s %16s %16s %16s %16s %16s %16s \n" , "n" , name0 , name1 , name2 , name3 , name4 , name5 , name6); fflush( stderr ); #endif for ( int n=min ; n<max ; n+=inc ) { const int N = (n*n); double * A; double * B; unsigned long long t0, t1; A = safemalloc( N * sizeof(double) ); B = safemalloc( N * sizeof(double) ); int k = 0; #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif for ( int i=0 ; i<n ; i++ ) for ( int j=0 ; j<n ; j++ ) A[i*n+j] = (double)(k++); #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif for ( int i=0 ; i<n ; i++ ) for ( int j=0 ; j<n ; j++ ) B[i*n+j] = 0.0; /* reference - memcpy */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) memcpy( B , A , N ); t1 = getticks(); d0[n] = (t1-t0)/REPEAT; /* reference - direct copy */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif for ( int i=0 ; i<n ; i++ ) for ( int j=0 ; j<n ; j++ ) B[i*n+j] = A[i*n+j]; } t1 = getticks(); d1[n] = (t1-t0)/REPEAT; /* basic w/ stride-1 stores */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif for ( int i=0 ; i<n ; i++ ) for ( int j=0 ; j<n ; j++ ) B[i*n+j] = A[j*n+i]; } t1 = getticks(); d2[n] = (t1-t0)/REPEAT; /* verify */ for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) assert( B[i*n+j] == A[j*n+i] ); /* basic w/ stride-1 loads */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } t1 = getticks(); d3[n] = (t1-t0)/REPEAT; /* verify */ for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) assert( B[i*n+j] == A[j*n+i] ); /* pragma unroll 4x4 + s1 loads */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel for if(n>OMP_MIN_SIZE) #endif //#pragma unroll(4) #pragma unroll_and_jam for ( int j=0 ; j<n ; j++ ) //#pragma unroll(4) #pragma unroll_and_jam for ( int i=0 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } t1 = getticks(); d4[n] = (t1-t0)/REPEAT; /* verify */ for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) assert( B[i*n+j] == A[j*n+i] ); /* manual unroll 4x4 + s1 loads */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel if(n>OMP_MIN_SIZE) #endif { int n4 = n-(n%4); /* divisible-by-4 part */ #ifdef OMP #pragma omp for private(i,j,n4) #endif for ( int j=0 ; j<n4 ; j+=4 ) { for ( int i=0 ; i<n4 ; i+=4 ) { B[(i )*n+j ] = A[(j )*n+i ]; B[(i )*n+j+1] = A[(j+1)*n+i ]; B[(i )*n+j+2] = A[(j+2)*n+i ]; B[(i )*n+j+3] = A[(j+3)*n+i ]; B[(i+1)*n+j ] = A[(j )*n+i+1]; B[(i+1)*n+j+1] = A[(j+1)*n+i+1]; B[(i+1)*n+j+2] = A[(j+2)*n+i+1]; B[(i+1)*n+j+3] = A[(j+3)*n+i+1]; B[(i+2)*n+j ] = A[(j )*n+i+2]; B[(i+2)*n+j+1] = A[(j+1)*n+i+2]; B[(i+2)*n+j+2] = A[(j+2)*n+i+2]; B[(i+2)*n+j+3] = A[(j+3)*n+i+2]; B[(i+3)*n+j ] = A[(j )*n+i+3]; B[(i+3)*n+j+1] = A[(j+1)*n+i+3]; B[(i+3)*n+j+2] = A[(j+2)*n+i+3]; B[(i+3)*n+j+3] = A[(j+3)*n+i+3]; } for ( int i=n4 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } for ( int j=n4 ; j<n ; j++ ) { for ( int i=0 ; i<n4 ; i+=4 ) { B[(i )*n+j] = A[j*n+i ]; B[(i+1)*n+j] = A[j*n+i+1]; B[(i+2)*n+j] = A[j*n+i+2]; B[(i+3)*n+j] = A[j*n+i+3]; } for ( int i=n4 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } } } t1 = getticks(); d5[n] = (t1-t0)/REPEAT; /* verify */ for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) assert( B[i*n+j] == A[j*n+i] ); /* manual unroll 4x4 and vectorize + s1 loads */ t0 = getticks(); for ( int t=0 ; t<REPEAT ; t++ ) { #ifdef OMP #pragma omp parallel if(n>OMP_MIN_SIZE) #endif { int n4 = n-(n%4); /* divisible-by-4 part */ #ifdef OMP #pragma omp for private(i,j,n4) #endif for ( int j=0 ; j<n4 ; j+=4 ) { for ( int i=0 ; i<n4 ; i+=4 ) { double a00, a01, a02, a03; double a10, a11, a12, a13; double a20, a21, a22, a23; double a30, a31, a32, a33; double b00, b01, b02, b03; double b10, b11, b12, b13; double b20, b21, b22, b23; double b30, b31, b32, b33; a00 = A[(j )*n+i ]; a01 = A[(j )*n+i+1]; a02 = A[(j )*n+i+2]; a03 = A[(j )*n+i+3]; a10 = A[(j+1)*n+i ]; a11 = A[(j+1)*n+i+1]; a12 = A[(j+1)*n+i+2]; a13 = A[(j+1)*n+i+3]; a20 = A[(j+2)*n+i ]; a21 = A[(j+2)*n+i+1]; a22 = A[(j+2)*n+i+2]; a23 = A[(j+2)*n+i+3]; a30 = A[(j+3)*n+i ]; a31 = A[(j+3)*n+i+1]; a32 = A[(j+3)*n+i+2]; a33 = A[(j+3)*n+i+3]; b00=a00; b01=a10; b02=a20; b03=a30; b10=a01; b11=a11; b12=a21; b13=a31; b20=a02; b21=a12; b22=a22; b23=a32; b30=a03; b31=a13; b32=a23; b33=a33; B[(i )*n+j ] = b00; B[(i )*n+j+1] = b01; B[(i )*n+j+2] = b02; B[(i )*n+j+3] = b03; B[(i+1)*n+j ] = b10; B[(i+1)*n+j+1] = b11; B[(i+1)*n+j+2] = b12; B[(i+1)*n+j+3] = b13; B[(i+2)*n+j ] = b20; B[(i+2)*n+j+1] = b21; B[(i+2)*n+j+2] = b22; B[(i+2)*n+j+3] = b23; B[(i+3)*n+j ] = b30; B[(i+3)*n+j+1] = b31; B[(i+3)*n+j+2] = b32; B[(i+3)*n+j+3] = b33; } for ( int i=n4 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } for ( int j=n4 ; j<n ; j++ ) { for ( int i=0 ; i<n4 ; i+=4 ) { int a00, a01, a02, a03; int b00, b10, b20, b30; a00 = A[j*n+i ]; a01 = A[j*n+i+1]; a02 = A[j*n+i+2]; a03 = A[j*n+i+3]; b00=a00; b10=a01; b20=a02; b30=a03; B[(i )*n+j] = b00; B[(i+1)*n+j] = b10; B[(i+2)*n+j] = b20; B[(i+3)*n+j] = b30; } for ( int i=n4 ; i<n ; i++ ) B[i*n+j] = A[j*n+i]; } } } t1 = getticks(); d6[n] = (t1-t0)/REPEAT; /* verify */ for ( int j=0 ; j<n ; j++ ) for ( int i=0 ; i<n ; i++ ) assert( B[i*n+j] == A[j*n+i] ); #ifdef DEBUG if ( n<11 ) { printf( "A: \n" ); for ( int i=0 ; i<n ; i++ ) { for ( int j=0 ; j<n ; j++ ) printf( "%12.1lf" , A[i*n+j]); printf( "\n" ); } printf( "B: \n" ); for ( int i=0 ; i<n ; i++ ) { for ( int j=0 ; j<n ; j++ ) printf( "%12.1lf" , B[i*n+j]); printf( "\n" ); } } /* this is just for the neurotic person who cannot wait until the end for data */ double c = 1.0 / d0[n]; fprintf( stderr , "%4d %8llu %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) \n" , n , d0[n] , d1[n] , c*d1[n] , d2[n] , c*d2[n] , d3[n] , c*d3[n] , d4[n] , c*d4[n] , d5[n] , c*d5[n] , d6[n] , c*d6[n] ); free(B); free(A); fflush( stderr ); #endif } /* print analysis */ printf( "# timing in cycles (ratio relative to memcpy) \n" ); printf( "%4s %8s %16s %16s %16s %16s %16s %16s \n" , "# n" , name0 , name1 , name2 , name3 , name4 , name5 , name6); for ( int n=min ; n<max ; n+=inc) { double c = 1.0 / d0[n]; printf( "%4d %8llu %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) %8llu (%5.1lf) \n" , n , d0[n] , d1[n] , c*d1[n] , d2[n] , c*d2[n] , d3[n] , c*d3[n] , d4[n] , c*d4[n] , d5[n] , c*d5[n] , d6[n] , c*d6[n] ); } printf( "# ...the end \n" ); fflush( stdout ); free(d6); free(d5); free(d4); free(d3); free(d2); free(d1); free(d0); } #ifdef MPI MPI_Finalize(); #endif return 0; }
config.h
/* config.h. Generated from config.in by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* Define to 1 if translation of program messages to the user's native language is requested. */ #define ENABLE_NLS 1 /* Define to enable linker plugins */ #define ENABLE_PLUGINS 1 /* Define to do multi-threaded linking */ /* #undef ENABLE_THREADS */ /* Default big endian (true or false) */ #define GOLD_DEFAULT_BIG_ENDIAN false /* Default machine code */ #define GOLD_DEFAULT_MACHINE EM_X86_64 /* Default OSABI code */ #define GOLD_DEFAULT_OSABI ELFOSABI_NONE /* Default size (32 or 64) */ #define GOLD_DEFAULT_SIZE 64 /* Define to 1 if you have the <byteswap.h> header file. */ #define HAVE_BYTESWAP_H 1 /* Define to 1 if you have the `chsize' function. */ /* #undef HAVE_CHSIZE */ /* Define to 1 if you have the declaration of `asprintf', and to 0 if you don't. */ #define HAVE_DECL_ASPRINTF 1 /* Define to 1 if you have the declaration of `basename', and to 0 if you don't. */ #define HAVE_DECL_BASENAME 1 /* Define to 1 if you have the declaration of `ffs', and to 0 if you don't. */ #define HAVE_DECL_FFS 1 /* Define to 1 if you have the declaration of `memmem', and to 0 if you don't. */ #define HAVE_DECL_MEMMEM 1 /* Define to 1 if you have the declaration of `snprintf', and to 0 if you don't. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if you have the declaration of `strndup', and to 0 if you don't. */ #define HAVE_DECL_STRNDUP 1 /* Define to 1 if you have the declaration of `strverscmp', and to 0 if you don't. */ #define HAVE_DECL_STRVERSCMP 1 /* Define to 1 if you have the declaration of `vasprintf', and to 0 if you don't. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you don't. */ #define HAVE_DECL_VSNPRINTF 1 /* Define to 1 if you have the <ext/hash_map> header file. */ #define HAVE_EXT_HASH_MAP 1 /* Define to 1 if you have the <ext/hash_set> header file. */ #define HAVE_EXT_HASH_SET 1 /* Define to 1 if you have the `ffsll' function. */ #define HAVE_FFSLL 1 /* Define to 1 if you have the `ftruncate' function. */ #define HAVE_FTRUNCATE 1 /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `mallinfo' function. */ #define HAVE_MALLINFO 1 /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define to 1 if you have the mremap function with MREMAP_MAYMOVE support */ #define HAVE_MREMAP 1 /* Define if compiler supports #pragma omp threadprivate */ #define HAVE_OMP_SUPPORT 1 /* Define to 1 if you have the `posix_fallocate' function. */ #define HAVE_POSIX_FALLOCATE 1 /* Define to 1 if you have the `pread' function. */ #define HAVE_PREAD 1 /* Define to 1 if you have the `readv' function. */ #define HAVE_READV 1 /* Define if struct stat has a field st_mtim with timespec for mtime */ #define HAVE_STAT_ST_MTIM 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the <sys/mman.h> header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to support 32-bit big-endian targets */ #define HAVE_TARGET_32_BIG 1 /* Define to support 32-bit little-endian targets */ #define HAVE_TARGET_32_LITTLE 1 /* Define to support 64-bit big-endian targets */ #define HAVE_TARGET_64_BIG 1 /* Define to support 64-bit little-endian targets */ #define HAVE_TARGET_64_LITTLE 1 /* Define if attributes work on C++ templates */ #define HAVE_TEMPLATE_ATTRIBUTES 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define to 1 if you have the <tr1/unordered_map> header file. */ #define HAVE_TR1_UNORDERED_MAP 1 /* Define if ::std::tr1::unordered_map::rehash is usable */ #define HAVE_TR1_UNORDERED_MAP_REHASH 1 /* Define to 1 if you have the <tr1/unordered_set> header file. */ #define HAVE_TR1_UNORDERED_SET 1 /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the <zlib.h> header file. */ #define HAVE_ZLIB_H 1 /* Name of package */ #define PACKAGE "gold" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "" /* Define to the full name of this package. */ #define PACKAGE_NAME "gold" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "gold 0.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "gold" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "0.1" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* System root for target files */ #define TARGET_SYSTEM_ROOT "" /* Whether the system root can be relocated */ #define TARGET_SYSTEM_ROOT_RELOCATABLE 0 /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Version number of package */ #define VERSION "0.1" /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Define to 1 if on MINIX. */ /* #undef _MINIX */ /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ /* #undef _POSIX_1_SOURCE */ /* Define to 1 if you need to in order for `stat' and other things to work. */ /* #undef _POSIX_SOURCE */
atomic-18.c
int i, v; float f; void foo (int j) { #pragma omp atomic update,update /* { dg-error "too many atomic clauses" } */ i = i + 1; #pragma omp atomic seq_cst release /* { dg-error "too many memory order clauses" } */ i = i + 1; #pragma omp atomic read,release /* { dg-error "incompatible with 'acq_rel' or 'release' clauses" } */ v = i; #pragma omp atomic acq_rel read /* { dg-error "incompatible with 'acq_rel' or 'release' clauses" } */ v = i; #pragma omp atomic write acq_rel /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = v; #pragma omp atomic acquire , write /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = v; #pragma omp atomic update ,acquire /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = i + 1; #pragma omp atomic acq_rel update /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = i + 1; #pragma omp atomic acq_rel,hint(0) /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = i + 1; #pragma omp atomic acquire /* { dg-error "incompatible with 'acq_rel' or 'acquire' clauses" } */ i = i + 1; #pragma omp atomic capture hint (0) capture /* { dg-error "too many atomic clauses" } */ v = i = i + 1; #pragma omp atomic hint(j + 2) /* { dg-error "constant integer expression" } */ i = i + 1; #pragma omp atomic hint(f) /* { dg-error "integ" } */ i = i + 1; #pragma omp atomic foobar /* { dg-error "expected 'read', 'write', 'update', 'capture', 'seq_cst', 'acq_rel', 'release', 'relaxed' or 'hint' clause" } */ i = i + 1; /* { dg-error "expected end of line before" "" { target *-*-* } .-1 } */ }
rawKeccak_256_fmt_plug.c
/* Keccak-256 cracker patch for JtR. Hacked together during May of 2013 * by Dhiru Kholia <dhiru.kholia at gmail.com>. * * Usage: john --format:raw-keccak-256 <hash file> * * This file is part of John the Ripper password cracker, * Copyright (c) 2012 by Solar Designer * * based on rawMD4_fmt.c code, with trivial changes by groszek. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawKeccak_256; #elif FMT_REGISTERS_H john_register_one(&fmt_rawKeccak_256); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "KeccakHash.h" #ifdef _OPENMP #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #include <omp.h> #endif #include "memdbg.h" #define FORMAT_TAG "$keccak256$" #define TAG_LENGTH (sizeof(FORMAT_TAG)-1) #define FORMAT_LABEL "Raw-Keccak-256" #define FORMAT_NAME "" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define CIPHERTEXT_LENGTH 64 #define BINARY_SIZE 32 #define SALT_SIZE 0 #define BINARY_ALIGN 4 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests tests[] = { {"4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"}, {"$keccak256$4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45", "abc"}, {"$keccak256$3b673b24a64aebb286f193e5c985c8e528db8590f997d9130889ca7f5f4cfe6e", "passWOrd"}, {"$keccak256$2a359feeb8e488a1af2c03b908b3ed7990400555db73e1421181d97cac004d48", "123456789"}, {"$keccak256$c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", ""}, {NULL} }; static int (*saved_len); // the Keccak function can read up to next even 8 byte offset. // making the buffer larger avoid reading past end of buffer static char (*saved_key)[(((PLAINTEXT_LENGTH+1)+7)/8)*8]; static uint32_t (*crypt_out) [(BINARY_SIZE + sizeof(uint32_t) - 1) / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_len = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_len)); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); MEM_FREE(saved_len); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); strlwr(out + TAG_LENGTH); return out; } static void *get_binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_key(char *key, int index) { int len = strlen(key); saved_len[index] = len; if (len > PLAINTEXT_LENGTH) len = saved_len[index] = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, len); } static char *get_key(int index) { saved_key[index][saved_len[index]] = 0; return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { Keccak_HashInstance hash; Keccak_HashInitialize(&hash, 1088, 512, 256, 0x01); Keccak_HashUpdate(&hash, (unsigned char*)saved_key[index], saved_len[index] * 8); Keccak_HashFinal(&hash, (unsigned char*)crypt_out[index]); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rawKeccak_256 = { { FORMAT_LABEL, FORMAT_NAME, "Keccak 256 " ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_SPLIT_UNIFIES_CASE, { NULL }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
ParFriends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include <iostream> #include <cstdarg> #include "SpParMat.h" #include "SpParMat3D.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" #include "mtSpGEMM.h" #include "MultiwayMerge.h" #include <unistd.h> #include <type_traits> namespace combblas { template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** ** Concatenate all the FullyDistVec<IT,NT> objects into a single one **/ template <typename IT, typename NT> FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs) { if(vecs.size() < 1) { SpParHelper::Print("Warning: Nothing to concatenate, returning empty "); return FullyDistVec<IT,NT>(); } else if (vecs.size() < 2) { return vecs[1]; } else { typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin(); std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid(); MPI_Comm World = commGridPtr->GetWorld(); IT nglen = it->TotalLength(); // new global length IT cumloclen = it->MyLocLength(); // existing cumulative local lengths ++it; for(; it != vecs.end(); ++it) { if(*(commGridPtr) != *(it->getcommgrid())) { SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n"); MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } nglen += it->TotalLength(); cumloclen += it->MyLocLength(); } FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT()); int nprocs = commGridPtr->GetSize(); std::vector< std::vector< NT > > data(nprocs); std::vector< std::vector< IT > > inds(nprocs); IT gloffset = 0; for(it = vecs.begin(); it != vecs.end(); ++it) { IT loclen = it->LocArrSize(); for(IT i=0; i < loclen; ++i) { IT locind; IT loffset = it->LengthUntil(); int owner = ConCat.Owner(gloffset+loffset+i, locind); data[owner].push_back(it->arr[i]); inds[owner].push_back(locind); } gloffset += it->TotalLength(); } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0)); NT * senddatabuf = new NT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]); std::vector<NT>().swap(data[i]); // delete data vectors } NT * recvdatabuf = new NT[totrecv]; MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data delete [] senddatabuf; IT * sendindsbuf = new IT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]); std::vector<IT>().swap(inds[i]); // delete inds vectors } IT * recvindsbuf = new IT[totrecv]; MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds DeleteAll(sendindsbuf, sendcnt, sdispls); for(int i=0; i<nprocs; ++i) { for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j) { ConCat.arr[recvindsbuf[j]] = recvdatabuf[j]; } } DeleteAll(recvindsbuf, recvcnt, rdispls); return ConCat; } } template <typename MATRIXA, typename MATRIXB> bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B) { if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return false; } if((void*) &A == (void*) &B) { std::ostringstream outs; outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS); return false; } return true; } // Combined logic for prune, recovery, and select template <typename IT, typename NT, typename DER> void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); #ifdef TIMING double t0, t1; #endif // Prune and create a new pruned matrix SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false); // column-wise statistics of the pruned matrix FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0); FullyDistVec<IT,NT> nnzPerColumnUnpruned = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); //FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold); FullyDistVec<IT,NT> pruneCols(nnzPerColumn); pruneCols = hardThreshold; PrunedA.FreeMemory(); FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum)); // recover only when nnzs in unprunned columns are greater than nnzs in pruned column recoverCols = EWiseApply<NT>(recoverCols, nnzPerColumnUnpruned, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval > spval;}, false, NT()); recoverCols = recoverPct; // columns with nnz < r AND sum < recoverPct (pct) recoverCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT nrecover = recoverCols.getnnz(); if(nrecover > 0) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(recoverCols, recoverNum, kselectVersion); #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(recoverCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing recovery: " << nrecover << std::endl; SpParHelper::Print(outs.str()); #endif } if(selectNum>0) { // remaining columns will be up for selection FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return spval==-1;}, true, static_cast<NT>(-1)); selectCols = selectNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval > spval;}, false, NT()); IT nselect = selectCols.getnnz(); if(nselect > 0 ) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing selection: " << nselect << std::endl; SpParHelper::Print(outs.str()); #endif #ifdef TIMING t0=MPI_Wtime(); #endif SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif if(recoverNum>0 ) // recovery can be attempted after selection { FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0); selectedA.FreeMemory(); // slected columns with nnz < recoverNum (r) selectCols = recoverNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); // selected columns with sum < recoverPct (pct) selectCols = recoverPct; selectCols = EWiseApply<NT>(selectCols, colSums1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT n_recovery_after_select = selectCols.getnnz(); if(n_recovery_after_select>0) { // mclExpandVector2 does it on the original vector // mclExpandVector1 does it one pruned vector #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs1; outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl; SpParHelper::Print(outs1.str()); #endif } } } } // final prune #ifdef TIMING t0=MPI_Wtime(); #endif A.PruneColumn(pruneCols, std::less<NT>(), true); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif // Add loops for empty columns if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns { FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0)); emptyColumns = 1.00; //Ariful: We need a selective AddLoops function with a sparse vector //A.AddLoops(emptyColumns); } } template <typename SR, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> IU EstimateFLOP (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; IU local_flops = 0; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements local_flops += EstimateLocalFLOP<SR> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original IU global_flops = 0; MPI_Allreduce(&local_flops, &global_flops, 1, MPI_LONG_LONG_INT, MPI_SUM, A.getcommgrid()->GetWorld()); return global_flops; } /** * Broadcasts A multiple times (#phases) in order to save storage in the output * Only uses 1/phases of C memory if the threshold/max limits are proper */ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory) { typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return SpParMat< IU,NUO,UDERO >(); } if(phases <1 || phases >= A.getncol()) { SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n"); phases = 1; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); double t0, t1, t2, t3, t4, t5; #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t0 = MPI_Wtime(); #endif if(perProcessMemory>0) // estimate the number of phases permitted by memory { int p; MPI_Comm World = GridC->GetWorld(); MPI_Comm_size(World,&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); // max nnz(A) in a porcess int64_t lannz = A.getlocalnnz(); int64_t gannz; MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World); int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA) // max nnz(A^2) stored by SUMMA in a porcess int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false); int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step // estimate kselect memory int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices) // this is equivalent to (asquareNNZ * p) / B.getcol() int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); int64_t kselectmem = B.getlocalcols() * k * 8 * 3; // estimate output memory int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); int64_t outputMem = outputNNZ * perNNZMem_in * 2; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem; if(remainingMem > 0) { phases = 1 + (asquareMem+kselectmem) / remainingMem; } if(myrank==0) { if(remainingMem < 0) { std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; } #ifdef SHOW_MEMORY_USAGE int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases; if(maxMemory>1000000000) std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl; else std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl; #endif } } if(myrank == 0){ fprintf(stderr, "[MemEfficientSpGEMM] Running with phase: %d\n", phases); } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t1 = MPI_Wtime(); mcl_symbolictime += (t1-t0); #endif LIA C_m = A.spSeq->getnrow(); LIB C_n = B.spSeq->getncol(); std::vector< UDERB > PiecesOfB; UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point MPI_Barrier(GridC->GetWorld()); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same"); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< UDERO > toconcatenate; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int dbg = 0; dbg < 1; dbg++){ for(int p = 0; p< phases; ++p) { SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld()); std::vector< SpTuples<LIC,NUO> *> tomerge; for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) ARecv = A.spSeq; // shallow-copy else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row ARecv = new UDERA(); // first, create the object } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t0 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); t1 = MPI_Wtime(); mcl_Abcasttime += (t1-t0); #endif ess.clear(); if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i]; BRecv = new UDERB(); } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t2=MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t3=MPI_Wtime(); mcl_Bbcasttime += (t3-t2); #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t4=MPI_Wtime(); #endif double vm_usage, resident_set; SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself); #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t5=MPI_Wtime(); mcl_localspgemmtime += (t5-t4); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } // all stages executed #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_unmerged, lcnnz_unmerged = 0; for(size_t i = 0; i < tomerge.size(); ++i) { lcnnz_unmerged += tomerge[i]->getnnz(); } MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts if(myrank==0) { if(summa_memory>1000000000) std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ; else std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t6=MPI_Wtime(); #endif //UDERO OnePieceOfC(MergeAll<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true), false); // TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy SpTuples<LIC,NUO> * OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_merged, lcnnz_merged ; lcnnz_merged = OnePieceOfC_tuples->getnnz(); MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20; if(myrank==0) { if(merge_memory>1000000000) std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ; else std::cout << " merged: " << merge_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t7=MPI_Wtime(); mcl_multiwaymergetime += (t7-t6); #endif UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false); delete OnePieceOfC_tuples; SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC); MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion); //mcl_nnzc += OnePieceOfC_mat.getnnz(); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_pruned, lcnnz_pruned ; lcnnz_pruned = OnePieceOfC_mat.getlocalnnz(); MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy! //phase_nnz += gcnnz_pruned; if(myrank==0) { if(prune_memory>1000000000) std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ; else std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ; } #endif // ABAB: Change this to accept pointers to objects if(dbg == 0) { toconcatenate.push_back(OnePieceOfC_mat.seq()); } } //double vm_usage, resident_set; //process_mem_usage(vm_usage, resident_set); //if(myrank == 0) fprintf(stderr, "VmSize after %dth all phase: %lf %lf\n", dbg+1, vm_usage, resident_set); } UDERO * C = new UDERO(0,C_m, C_n,0); C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERA::esscount); return SpParMat<IU,NUO,UDERO> (C, GridC); } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int CalculateNumberOfPhases (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){ int phases; typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); double t0, t1, t2, t3, t4, t5; int p; MPI_Comm World = GridC->GetWorld(); MPI_Comm_size(World,&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); // max nnz(A) in a porcess int64_t lannz = A.getlocalnnz(); int64_t gannz; MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World); int64_t inputMem = gannz * perNNZMem_in * 4; // for four copies (two for SUMMA) // max nnz(A^2) stored by SUMMA in a porcess int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B, false); int64_t asquareMem = asquareNNZ * perNNZMem_out * 2; // an extra copy in multiway merge and in selection/recovery step // estimate kselect memory int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices) // this is equivalent to (asquareNNZ * p) / B.getcol() int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); int64_t kselectmem = B.getlocalcols() * k * 8 * 3; // estimate output memory int64_t outputNNZ = (B.getlocalcols() * d)/sqrt(p); //int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); // if kselect is used int64_t outputMem = outputNNZ * perNNZMem_in * 2; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory //int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem; int64_t remainingMem = perProcessMemory*1000000000 - inputMem; // if each phase result is discarded //if(remainingMem > 0) //{ //phases = 1 + (asquareMem+kselectmem) / remainingMem; //} phases = 1 + asquareMem / remainingMem; return phases; } /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * @pre { Input matrices, A and B, should not alias } * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); static_assert(std::is_same<LIA, LIC>::value, "local index types for input and output matrices should be the same"); int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); LIA C_m = A.spSeq->getnrow(); LIB C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); // Transpose back for the column-by-column algorithm const_cast< UDERB* >(B1seq)->Transpose(); const_cast< UDERB* >(B2seq)->Transpose(); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<LIC,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements // before activating this remove transposing B1seq /* SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition */ SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } if(clearA) delete A1seq; if(clearB) delete B1seq; // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements // before activating this remove transposing B2seq /* SpTuples<LIC,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition */ SpTuples<LIC,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { B1seq->Transpose(); B2seq->Transpose(); (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } void process_mem_usage(double& vm_usage, double& resident_set) { using std::ios_base; using std::ifstream; using std::string; vm_usage = 0.0; resident_set = 0.0; // 'file' stat seems to give the most reliable results // ifstream stat_stream("/proc/self/stat",ios_base::in); // dummy vars for leading entries in stat that we don't care about // string pid, comm, state, ppid, pgrp, session, tty_nr; string tpgid, flags, minflt, cminflt, majflt, cmajflt; string utime, stime, cutime, cstime, priority, nice; string O, itrealvalue, starttime; // the two fields we want // unsigned long vsize; long rss; stat_stream >> pid >> comm >> state >> ppid >> pgrp >> session >> tty_nr >> tpgid >> flags >> minflt >> cminflt >> majflt >> cmajflt >> utime >> stime >> cutime >> cstime >> priority >> nice >> O >> itrealvalue >> starttime >> vsize >> rss; // don't care about the rest stat_stream.close(); long page_size_kb = sysconf(_SC_PAGE_SIZE) / 1024; // in case x86-64 is configured to use 2MB pages //vm_usage = vsize / (1024.0 * 1024 * 1024); //resident_set = rss * page_size_kb/(1024 * 1024.0); vm_usage = vsize / (1024.0); resident_set = rss * page_size_kb/(1.0); double max_vm_usage; double max_resident_set; MPI_Allreduce(&vm_usage, &max_vm_usage, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Allreduce(&resident_set, &max_resident_set, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); vm_usage = max_vm_usage; resident_set = max_resident_set; } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast * @pre { Input matrices, A and B, should not alias } **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); double Abcast_time = 0; double Bbcast_time = 0; double Local_multiplication_time = 0; for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t0 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t1 = MPI_Wtime(); mcl3d_Abcasttime += (t1-t0); Abcast_time += (t1-t0); #endif ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t2 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t3 = MPI_Wtime(); mcl3d_Bbcasttime += (t3-t2); Bbcast_time += (t3-t2); #endif #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t4 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t5 = MPI_Wtime(); mcl3d_localspgemmtime += (t5-t4); Local_multiplication_time += (t5-t4); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << i << "th SUMMA iteration"<< std::endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); //UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER // the last parameter to MergeAll deletes tomerge arrays #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t0 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); #ifdef TIMING MPI_Barrier(A.getcommgrid()->GetWorld()); double t1 = MPI_Wtime(); mcl3d_SUMMAmergetime += (t1-t0); #endif UDERO * C = new UDERO(*C_tuples, false); delete C_tuples; //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original #ifdef TIMING if(myrank == 0){ fprintf(stderr, "[Mult_AnXBn_Synch]\t Abcast_time: %lf\n", Abcast_time); fprintf(stderr, "[Mult_AnXBn_Synch]\t Bbcast_time: %lf\n", Bbcast_time); fprintf(stderr, "[Mult_AnXBn_Synch]\t Local_multiplication_time: %lf\n", Local_multiplication_time); fprintf(stderr, "[Mult_AnXBn_Synch]\t SUMMA Merge time: %lf\n", (t1-t0)); } #endif return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Overlap (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA ** ARecv = new UDERA* [stages]; UDERB ** BRecv = new UDERB* [stages]; Arr<IU,NU1> Aarrinfo = A.seqptr()->GetArrays(); Arr<IU,NU2> Barrinfo = B.seqptr()->GetArrays(); std::vector< std::vector<MPI_Request> > ABCastIndarrayReq; std::vector< std::vector<MPI_Request> > ABCastNumarrayReq; std::vector< std::vector<MPI_Request> > BBCastIndarrayReq; std::vector< std::vector<MPI_Request> > BBCastNumarrayReq; for(int i = 0; i < stages; i++){ ABCastIndarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.indarrs.size(), MPI_REQUEST_NULL) ); ABCastNumarrayReq.push_back( std::vector<MPI_Request>(Aarrinfo.numarrs.size(), MPI_REQUEST_NULL) ); BBCastIndarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.indarrs.size(), MPI_REQUEST_NULL) ); BBCastNumarrayReq.push_back( std::vector<MPI_Request>(Barrinfo.numarrs.size(), MPI_REQUEST_NULL) ); } int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); std::vector< SpTuples<IU,NUO> *> tomerge; for(int i = 0; i < stages; ++i){ std::vector<IU> ess; if(i == Aself) ARecv[i] = A.spSeq; // shallow-copy else{ ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row ARecv[i] = new UDERA(); // first, create the object } SpParHelper::IBCastMatrix(GridC->GetRowWorld(), *(ARecv[i]), ess, i, ABCastIndarrayReq[i], ABCastNumarrayReq[i]); // then, receive its elements ess.clear(); if(i == Bself) BRecv[i] = B.spSeq; // shallow-copy else{ ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i]; BRecv[i] = new UDERB(); } SpParHelper::IBCastMatrix(GridC->GetColWorld(), *(BRecv[i]), ess, i, BBCastIndarrayReq[i], BBCastNumarrayReq[i]); // then, receive its elements if(i > 0){ MPI_Waitall(ABCastIndarrayReq[i-1].size(), ABCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(ABCastNumarrayReq[i-1].size(), ABCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastIndarrayReq[i-1].size(), BBCastIndarrayReq[i-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastNumarrayReq[i-1].size(), BBCastNumarrayReq[i-1].data(), MPI_STATUSES_IGNORE); SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*(ARecv[i-1]), *(BRecv[i-1]), // parameters themselves i-1 != Aself, // 'delete A' condition i-1 != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); std::vector< SpTuples<IU,NUO> *>().swap(tomerge); tomerge.push_back(C_tuples); } #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << i << "th SUMMA iteration"<< std::endl; SpParHelper::Print(outs.str()); #endif } MPI_Waitall(ABCastIndarrayReq[stages-1].size(), ABCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(ABCastNumarrayReq[stages-1].size(), ABCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastIndarrayReq[stages-1].size(), BBCastIndarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); MPI_Waitall(BBCastNumarrayReq[stages-1].size(), BBCastNumarrayReq[stages-1].data(), MPI_STATUSES_IGNORE); SpTuples<IU,NUO> * C_cont = LocalHybridSpGEMM<SR, NUO> (*(ARecv[stages-1]), *(BRecv[stages-1]), // parameters themselves stages-1 != Aself, // 'delete A' condition stages-1 != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } delete ARecv; delete BRecv; SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); // the last parameter to MergeAll deletes tomerge arrays SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); std::vector< SpTuples<IU,NUO> *>().swap(tomerge); UDERO * C = new UDERO(*C_tuples, false); delete C_tuples; //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction * @pre { Input matrices, A and B, should not alias } **/ template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int64_t EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool hashEstimate) { typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; static_assert(std::is_same<LIA, LIB>::value, "local index types for both input matrices should be the same"); double t0, t1; int64_t nnzC_SUMMA = 0; if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return nnzC_SUMMA; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); MPI_Barrier(GridC->GetWorld()); LIA ** ARecvSizes = SpHelper::allocate2D<LIA>(UDERA::esscount, stages); LIB ** BRecvSizes = SpHelper::allocate2D<LIB>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<LIA> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } #ifdef TIMING t0 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements #ifdef TIMING t1 = MPI_Wtime(); sym_Abcasttime += t1-t0; #endif ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } #ifdef TIMING MPI_Barrier(GridC->GetWorld()); t0 = MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements #ifdef TIMING t1 = MPI_Wtime(); sym_Bbcasttime += t1-t0; #endif // no need to keep entries of colnnzC in larger precision // because colnnzC is of length nzc and estimates nnzs per column // @OGUZ-EDIT Using hash spgemm for estimation //LIB * colnnzC = estimateNNZ(*ARecv, *BRecv); #ifdef TIMING t0 = MPI_Wtime(); #endif LIB* flopC = estimateFLOP(*ARecv, *BRecv); #ifdef TIMING t1 = MPI_Wtime(); sym_estimatefloptime += t1-t0; #endif #ifdef TIMING t0 = MPI_Wtime(); #endif LIB* colnnzC = estimateNNZ_Hash(*ARecv, *BRecv, flopC); #ifdef TIMING t1 = MPI_Wtime(); sym_estimatennztime += t1-t0; #endif LIB nzc = BRecv->GetDCSC()->nzc; int64_t nnzC_stage = 0; #ifdef TIMING int64_t stage_proc_flop = 0; #ifdef THREADED #pragma omp parallel for reduction (+:stage_proc_flop) #endif for (LIB k=0; k<nzc; k++) { stage_proc_flop = stage_proc_flop + flopC[k]; } mcl3d_proc_flop += stage_proc_flop; #endif if (flopC) delete [] flopC; #ifdef TIMING t0 = MPI_Wtime(); #endif #ifdef THREADED #pragma omp parallel for reduction (+:nnzC_stage) #endif for (LIB k=0; k<nzc; k++) { nnzC_stage = nnzC_stage + colnnzC[k]; } nnzC_SUMMA += nnzC_stage; #ifdef TIMING t1 = MPI_Wtime(); sym_SUMMAnnzreductiontime += t1-t0; #endif if(colnnzC) delete [] colnnzC; // sampling-based estimation (comment the estimation above, and // comment out below to use) // int64_t nnzC_stage = estimateNNZ_sampling(*ARecv, *BRecv); // nnzC_SUMMA += nnzC_stage; // delete received data if(i != Aself) delete ARecv; if(i != Bself) delete BRecv; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); int64_t nnzC_SUMMA_max = 0; MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<int64_t>(), MPI_MAX, GridC->GetWorld()); return nnzC_SUMMA_max; } template <typename MATRIX, typename VECTOR> void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x) { if(A.getncol() != x.TotalLength()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << x.TotalLength() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) ) { std::cout << "Grids are not comparable for SpMV" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } /** * Step 1 of the sparse SpMV algorithm * @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated } * @param[in] indexisvalue **/ template<typename IU, typename NV> void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue) { int32_t xlocnz = (int32_t) x.getlocnnz(); int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t int32_t roffset; IU luntil = x.LengthUntil(); int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status); MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status); MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth trxinds = new int32_t[trxlocnz]; int32_t * temp_xind = new int32_t[xlocnz]; #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i< xlocnz; ++i) temp_xind[i] = (int32_t) x.ind[i]; MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status); delete [] temp_xind; if(!indexisvalue) { trxnums = new NV[trxlocnz]; MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status); } std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) } /** * Step 2 of the sparse SpMV algorithm * @param[in,out] trxinds, trxnums { deallocated } * @param[in,out] indacc, numacc { allocated } * @param[in,out] accnz { set } * @param[in] trxlocnz, lenuntil, indexisvalue **/ template<typename IU, typename NV> void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums, int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue) { int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); accnz = std::accumulate(colnz, colnz+colneighs, 0); indacc = new int32_t[accnz]; numacc = new NV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING double t0=MPI_Wtime(); #endif MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld); delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) lenuntilcol = lenuntil; MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld); for(int i=0; i< accnz; ++i) // fill numerical values from indices { numacc[i] = indacc[i] + lenuntilcol; } } else { MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld); delete [] trxnums; } #ifdef TIMING double t1=MPI_Wtime(); cblas_allgathertime += (t1-t0); #endif DeleteAll(colnz,dpls); } /** * Step 3 of the sparse SpMV algorithm, with the semiring * @param[in,out] optbuf {scratch space for all-to-all (fold) communication} * @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit} * @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created} **/ template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { // optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } else { generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // default SpMSpV std::vector< int32_t > indy; std::vector< OVT > numy; generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA); DeleteAll(indacc, numacc); int32_t bufsize = indy.size(); // as compact as possible sendindbuf = new int32_t[bufsize]; sendnumbuf = new OVT[bufsize]; int32_t perproc = A.getlocalrows() / rowneighs; int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); //#endif } } } // non threaded template <typename SR, typename IU, typename OVT> void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int32_t hsize = 0; int32_t inf = std::numeric_limits<int32_t>::min(); int32_t sup = std::numeric_limits<int32_t>::max(); KNHeap< int32_t, int32_t > sHeap(sup, inf); int * processed = new int[nlists](); for(int i=0; i<nlists; ++i) { if(listSizes[i] > 0) { // key, list_id sHeap.insert(indsvec[i][0], i); ++hsize; } } int32_t key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); mergedind.push_back( static_cast<IU>(key)); mergednum.push_back(numsvec[locv][0]); // nothing is processed yet if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); if(mergedind.back() == static_cast<IU>(key)) { mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { mergedind.push_back(static_cast<IU>(key)); mergednum.push_back(numsvec[locv][processed[locv]]); } if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } DeleteAll(processed); } template <typename SR, typename IU, typename OVT> void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int nthreads=1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif int nsplits = 4*nthreads; // oversplit for load balance nsplits = std::min(nsplits, (int)maxindex); std::vector< std::vector<int32_t> > splitters(nlists); for(int k=0; k< nlists; k++) { splitters[k].resize(nsplits+1); splitters[k][0] = static_cast<int32_t>(0); #pragma omp parallel for for(int i=1; i< nsplits; i++) { IU cur_idx = i * (maxindex/nsplits); auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx); splitters[k][i] = (int32_t) (it - indsvec[k]); } splitters[k][nsplits] = listSizes[k]; } // ------ perform merge in parallel ------ std::vector<std::vector<IU>> indsBuf(nsplits); std::vector<std::vector<OVT>> numsBuf(nsplits); //TODO: allocate these vectors here before calling MergeContributions #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::vector<int32_t *> tIndsVec(nlists); std::vector<OVT *> tNumsVec(nlists); std::vector<int> tLengths(nlists); for(int j=0; j< nlists; ++j) { tIndsVec[j] = indsvec[j] + splitters[j][i]; tNumsVec[j] = numsvec[j] + splitters[j][i]; tLengths[j]= splitters[j][i+1] - splitters[j][i]; } MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]); } // ------ concatenate merged tuples processed by threads ------ std::vector<IU> tdisp(nsplits+1); tdisp[0] = 0; for(int i=0; i<nsplits; ++i) { tdisp[i+1] = tdisp[i] + indsBuf[i].size(); } mergedind.resize(tdisp[nsplits]); mergednum.resize(tdisp[nsplits]); #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]); std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]); } } /** * This version is the most flexible sparse matrix X sparse vector [Used in KDT] * It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT) * without relying on automatic type promotion * Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x. */ template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA) { CheckSpMVCompliance(A,x); optbuf.MarkEmpty(); y.glen = A.getnrow(); // in case it is not set already MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int accnz; int32_t trxlocnz; IU lenuntil; int32_t *trxinds, *indacc; IVT *trxnums, *numacc; #ifdef TIMING double t0=MPI_Wtime(); #endif TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue); #ifdef TIMING double t1=MPI_Wtime(); cblas_transvectime += (t1-t0); #endif if(x.commGrid->GetGridRows() > 1) { AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set } else { accnz = trxlocnz; indacc = trxinds; // aliasing ptr numacc = trxnums; // aliasing ptr } int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); int * sendcnt = new int[rowneighs](); int32_t * sendindbuf; OVT * sendnumbuf; int * sdispls; #ifdef TIMING double t2=MPI_Wtime(); #endif LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated #ifdef TIMING double t3=MPI_Wtime(); cblas_localspmvtime += (t3-t2); #endif if(x.commGrid->GetGridCols() == 1) { y.ind.resize(sendcnt[0]); y.num.resize(sendcnt[0]); if(optbuf.totmax > 0 ) // graph500 optimization enabled { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = optbuf.inds[i]; y.num[i] = optbuf.nums[i]; } } else { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = sendindbuf[i]; y.num[i] = sendnumbuf[i]; } DeleteAll(sendindbuf, sendnumbuf,sdispls); } delete [] sendcnt; return; } int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); int32_t * recvindbuf = new int32_t[totrecv]; OVT * recvnumbuf = new OVT[totrecv]; #ifdef TIMING double t4=MPI_Wtime(); #endif if(optbuf.totmax > 0 ) // graph500 optimization enabled { MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); delete [] sendcnt; } else { MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls); } #ifdef TIMING double t5=MPI_Wtime(); cblas_alltoalltime += (t5-t4); #endif #ifdef TIMING double t6=MPI_Wtime(); #endif //MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs); // free memory of y, in case it was aliased std::vector<IU>().swap(y.ind); std::vector<OVT>().swap(y.num); std::vector<int32_t *> indsvec(rowneighs); std::vector<OVT *> numsvec(rowneighs); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<rowneighs; i++) { indsvec[i] = recvindbuf+rdispls[i]; numsvec[i] = recvnumbuf+rdispls[i]; } #ifdef THREADED MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength()); #else MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num); #endif DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf); #ifdef TIMING double t7=MPI_Wtime(); cblas_mergeconttime += (t7-t6); #endif } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf) { PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } /** * Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type * If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors) **/ template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors SpMV<SR>(A, x, y, indexisvalue, optbuf); return y; } /** * Parallel dense SpMV **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status); NUV * trxnums = new NUV[trxsize]; MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status); int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; std::fill_n(localy, ysize, id); #ifdef THREADED dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy); #else dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); #endif DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id); int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld); } delete [] localy; return y; } /** * \TODO: Old version that is no longer considered optimal * Kept for legacy purposes * To be removed when other functionals are fully tested. **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status); MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status); MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status); std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld); MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector std::vector< int32_t > indy; std::vector< T_promote > numy; int32_t * tmpindacc = new int32_t[accnz]; for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i]; delete [] indacc; dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(tmpindacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs; MPI_Comm_size(RowWorld,&rowneighs); std::vector< std::vector<IU> > sendind(rowneighs); std::vector< std::vector<T_promote> > sendnum(rowneighs); typename std::vector<int32_t>::size_type outnz = indy.size(); for(typename std::vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); std::vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); std::vector<T_promote>().swap(sendnum[i]); } MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; std::vector<IU> nzinds; // nonzero indices std::fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } // plain adapter template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true) { return EWiseApply<RETT, RETDER>(A, B, EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op), allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true); } // end adapter /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::cerr << "Vector dimensions don't match for EWiseMult\n"; MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness std::vector <IU> tlosizes (actual_splits, 0); std::vector < std::vector<IU> > tlinds(actual_splits); std::vector < std::vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } std::vector<IU> prefix_sum(actual_splits+1,0); std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** Threaded EWiseApply. Only called internally from EWiseApply. **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { int nthreads=1; #ifdef _OPENMP #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); // temporary result vectors per thread std::vector<std::vector<IU>> tProductInd(nthreads); std::vector<std::vector<T_promote>> tProductVal(nthreads); IU perthread; //chunk of tProductInd or tProductVal allocated to each thread if (allowVNulls) perthread = size/nthreads; else perthread = spsize/nthreads; #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif IU tStartIdx = perthread * curthread; IU tNextIdx = perthread * (curthread+1); if (allowVNulls) { if(curthread == nthreads-1) tNextIdx = size; // get sparse part for the current thread auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx); IU tSpIdx = (IU) std::distance(V.ind.begin(), it); // iterate over the dense vector for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx) { if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx) { if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false)); } tSpIdx++; } else { if (_doOp(Vzero, W.arr[tIdx], true, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false)); } } } } else // iterate over the sparse vector { if(curthread == nthreads-1) tNextIdx = spsize; for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx) { if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)) { tProductInd[curthread].push_back( V.ind[tSpIdx]); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)); } } } } std::vector<IU> tdisp(nthreads+1); tdisp[0] = 0; for(int i=0; i<nthreads; ++i) { tdisp[i+1] = tdisp[i] + tProductInd[i].size(); } // copy results from temporary vectors Product.ind.resize(tdisp[nthreads]); Product.num.resize(tdisp[nthreads]); #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]); std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]); } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { #ifdef _OPENMP return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp); #else typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); //FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there?? if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); IU sp_iter = 0; if (allowVNulls) { // iterate over the dense vector for(IU i=0; i<size; ++i) { if(sp_iter < spsize && V.ind[sp_iter] == i) { if (_doOp(V.num[sp_iter], W.arr[i], false, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false)); } sp_iter++; } else { if (_doOp(Vzero, W.arr[i], true, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false)); } } } } else { // iterate over the sparse vector for(sp_iter = 0; sp_iter < spsize; ++sp_iter) { if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)) { Product.ind.push_back(V.ind[sp_iter]); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)); } } } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } #endif } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * !allowVNulls && !allowWNulls => intersection * !allowVNulls && allowWNulls => operate on all elements of V * allowVNulls && !allowWNulls => operate on all elements of W * allowVNulls && allowWNulls => union * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...) * For intersection, Vzero and Wzero are irrelevant * ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses? **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp) { typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; typename std::vector< IU >::const_iterator indV = V.ind.begin(); typename std::vector< NU1 >::const_iterator numV = V.num.begin(); typename std::vector< IU >::const_iterator indW = W.ind.begin(); typename std::vector< NU2 >::const_iterator numW = W.num.begin(); while (indV < V.ind.end() && indW < W.ind.end()) { if (*indV == *indW) { // overlap if (allowIntersect) { if (_doOp(*numV, *numW, false, false)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, *numW, false, false)); } } indV++; numV++; indW++; numW++; } else if (*indV < *indW) { // V has value but W does not if (allowWNulls) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } } indV++; numV++; } else //(*indV > *indW) { // W has value but V does not if (allowVNulls) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } } indW++; numW++; } } // clean up while (allowWNulls && indV < V.ind.end()) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } indV++; numV++; } while (allowVNulls && indW < W.ind.end()) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } indW++; numW++; } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } // plain callback versions template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, Vzero, true); } template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true); } //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // sampling-based nnz estimation via SpMV // @OGUZ-NOTE This is not based on SUMMA, do not use. Estimates the number of // nonzeros in the final output matrix. #define NROUNDS 5 typedef std::array<float, NROUNDS> samparr_t; template <typename NZT> struct promote_trait<NZT, samparr_t> { typedef samparr_t T_promote; }; class SamplesSaveHandler { public: template<typename c, typename t, typename V> void save(std::basic_ostream<c, t> &os, std::array<V, NROUNDS> &sample_vec, int64_t index) { for (auto it = sample_vec.begin(); it != sample_vec.end(); ++it) os << *it << " "; } }; template<typename NZT> struct SelectMinxSR { static samparr_t id() { samparr_t arr; for (auto it = arr.begin(); it != arr.end(); ++it) *it = std::numeric_limits<float>::max(); return arr; } static bool returnedSAID() { return false; } static samparr_t add (const samparr_t &arg1, const samparr_t &arg2) { samparr_t out; for (int i = 0; i < NROUNDS; ++i) out[i] = std::min(arg1[i], arg2[i]); return out; } static samparr_t multiply (const NZT arg1, const samparr_t &arg2) { return arg2; } static void axpy (const NZT a, const samparr_t &x, samparr_t &y) { y = add(y, multiply(a, x)); } static MPI_Op mpi_op() { static MPI_Op mpiop; static bool exists = false; if (exists) return mpiop; else { MPI_Op_create(MPI_func, true, &mpiop); exists = true; return mpiop; } } static void MPI_func(void *invec, void *inoutvec, int *len, MPI_Datatype *datatype) { samparr_t *in = static_cast<samparr_t *>(invec); samparr_t *inout = static_cast<samparr_t *>(inoutvec); for (int i = 0; i < *len; ++i) inout[i] = add(inout[i], in[i]); } }; template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> int64_t EstPerProcessNnzSpMV( SpParMat<IU, NU1, UDERA> &A, SpParMat<IU, NU2, UDERB> &B ) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); float lambda = 1.0f; int nthds = 1; #ifdef THREADED #pragma omp parallel #endif { nthds = omp_get_num_threads(); } if (myrank == 0) std::cout << "taking transposes." << std::endl; A.Transpose(); B.Transpose(); if (myrank == 0) std::cout << "setting initial samples." << std::endl; samparr_t sa; FullyDistVec<IU, samparr_t> samples_init(A.getcommgrid(), A.getncol(), sa); #ifdef THREADED #pragma omp parallel #endif { std::default_random_engine gen; std::exponential_distribution<float> exp_dist(lambda); #ifdef THREADED #pragma omp parallel for #endif for (IU i = 0; i < samples_init.LocArrSize(); ++i) { samparr_t tmp; for (auto it = tmp.begin(); it != tmp.end(); ++it) *it = exp_dist(gen); samples_init.SetLocalElement(i, tmp); } } // std::string fname("samples_init"); // samples_init.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing mid samples." << std::endl; FullyDistVec<IU, samparr_t> samples_mid = SpMV<SelectMinxSR<NU1> > (A, samples_init); // fname = "samples_mid"; // samples_mid.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing final samples." << std::endl; FullyDistVec<IU, samparr_t> samples_final = SpMV<SelectMinxSR<NU2> > (B, samples_mid); // fname = "samples_final"; // samples_final.ParallelWrite(fname, 1, SamplesSaveHandler(), true); if (myrank == 0) std::cout << "computing nnz estimation." << std::endl; float nnzest = 0.0f; std::cout << myrank << "samples_final loc size: " << samples_final.LocArrSize() << std::endl; const samparr_t *lsamples = samples_final.GetLocArr(); #ifdef THREADED #pragma omp parallel for reduction (+:nnzest) #endif for (IU i = 0; i < samples_final.LocArrSize(); ++i) { float tmp = 0.0f; for (auto it = lsamples[i].begin(); it != lsamples[i].end(); ++it) tmp += *it; nnzest += static_cast<float>(NROUNDS - 1) / tmp; } if (myrank == 0) std::cout << "taking transposes again." << std::endl; int64_t nnzC_est = nnzest; int64_t nnzC_tot = 0; MPI_Allreduce(&nnzC_est, &nnzC_tot, 1, MPIType<int64_t>(), MPI_SUM, (B.commGrid)->GetWorld()); if (myrank == 0) std::cout << "sampling-based spmv est tot: " << nnzC_tot << std::endl; // revert back A.Transpose(); B.Transpose(); return nnzC_tot; } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDER1, typename UDER2> SpParMat3D<IU,NUO,UDERO> Mult_AnXBn_SUMMA3D(SpParMat3D<IU,NU1,UDER1> & A, SpParMat3D<IU,NU2,UDER2> & B){ int myrank; MPI_Comm_rank(MPI_COMM_WORLD, &myrank); typedef typename UDERO::LocalIT LIC; typedef typename UDER1::LocalIT LIA; typedef typename UDER2::LocalIT LIB; #ifdef TIMING double t0, t1, t2, t3; #endif /* * Check if A and B are multipliable * */ if(A.getncol() != B.getnrow()){ std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } /* * Calculate, accross fibers, which process should get how many columns after redistribution * */ vector<LIB> divisions3d; // Calcuclate split boundaries as if all contents of the layer is being re-distributed along fiber // These boundaries will be used later on B.CalculateColSplitDistributionOfLayer(divisions3d); #ifdef TIMING t0 = MPI_Wtime(); #endif /* * SUMMA Starts * */ int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(), (B.GetLayerMat()->getcommgrid()).get(), stages, dummy, dummy); IU C_m = A.GetLayerMat()->seqptr()->getnrow(); IU C_n = B.GetLayerMat()->seqptr()->getncol(); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERO::esscount, stages); SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() ); SpParHelper::GetSetSizes( *(B.GetLayerMat()->seqptr()), BRecvSizes, (B.GetLayerMat()->getcommgrid())->GetColWorld() ); // Remotely fetched matrices are stored as pointers UDERO * ARecv; UDER2 * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow(); int Bself = (B.GetLayerMat()->getcommgrid())->GetRankInProcCol(); double Abcast_time = 0; double Bbcast_time = 0; double Local_multiplication_time = 0; for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself){ ARecv = A.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDER1::esscount); for(int j=0; j<UDER1::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDER1(); // first, create the object } #ifdef TIMING t2 = MPI_Wtime(); #endif if (Aself != i) { ARecv->Create(ess); } Arr<IU,NU1> Aarrinfo = ARecv->GetArrays(); for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld()); } for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Abcasttime += (t3-t2); Abcast_time += (t3-t2); #endif ess.clear(); if(i == Bself){ BRecv = B.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDER2::esscount); for(int j=0; j<UDER2::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDER2(); } MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld()); #ifdef TIMING t2 = MPI_Wtime(); #endif if (Bself != i) { BRecv->Create(ess); } Arr<IU,NU2> Barrinfo = BRecv->GetArrays(); for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) { MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld()); } for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) { MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Bbcasttime += (t3-t2); Bbcast_time += (t3-t2); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself, // 'delete B' condition false); // not to sort each column #ifdef TIMING t3 = MPI_Wtime(); mcl3d_localspgemmtime += (t3-t2); Local_multiplication_time += (t3-t2); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); } SpHelper::deallocate2D(ARecvSizes, UDER1::esscount); SpHelper::deallocate2D(BRecvSizes, UDER2::esscount); #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort #ifdef TIMING t3 = MPI_Wtime(); mcl3d_SUMMAmergetime += (t3-t2); #endif #ifdef TIMING if(myrank == 0){ fprintf(stderr, "[SUMMA3D]\tAbcast_time: %lf\n", Abcast_time); fprintf(stderr, "[SUMMA3D]\tBbcast_time: %lf\n", Bbcast_time); fprintf(stderr, "[SUMMA3D]\tLocal_multiplication_time: %lf\n", Local_multiplication_time); fprintf(stderr, "[SUMMA3D]\tSUMMA Merge time: %lf\n", (t3-t2)); } #endif /* * SUMMA Ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_SUMMAtime += (t1-t0); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tSUMMA time: %lf\n", (t1-t0)); #endif /* * 3d-reduction starts * */ #ifdef TIMING //MPI_Barrier(getcommgrid3D()->GetWorld()); t0 = MPI_Wtime(); #endif MPI_Datatype MPI_tuple; MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple); MPI_Type_commit(&MPI_tuple); /* * Create a profile with information regarding data to be sent and received between layers * These memory allocation needs to be `int` specifically because some of these arrays would be used in communication * This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement * */ int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()](); int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()](); vector<IU> divisions3dPrefixSum(divisions3d.size()); divisions3dPrefixSum[0] = 0; std::partial_sum(divisions3d.begin(), divisions3d.end()-1, divisions3dPrefixSum.begin()+1); ColLexiCompare<IU,NUO> comp; IU totsend = C_tuples->getnnz(); #pragma omp parallel for for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ IU start_col = divisions3dPrefixSum[i]; IU end_col = divisions3dPrefixSum[i] + divisions3d[i]; std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO()); std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO()); std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp); std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp); // This type casting is important from semantic point of view sendcnt[i] = (int)(end_it - start_it); sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk sendprfl[i*3+2] = (int)(divisions3d[i]); // Number of columns in ith chunk } std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1); // Send profile ready. Now need to update the tuples to reflect correct column id after column split. for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ #pragma omp parallel for schedule(static) for(int j = 0; j < sendcnt[i]; j++){ std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - divisions3dPrefixSum[i]; } } MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->fiberWorld); for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3]; std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1); IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0)); std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv]))); #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->fiberWorld); delete C_tuples; #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tAlltoallv: %lf\n", (t3-t2)); #endif vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers()); #pragma omp parallel for for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){ recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false); } // Free all memory except tempTuples; Because that memory is holding data of newly created local matrices after receiving. DeleteAll(sendcnt, sendprfl, sdispls); DeleteAll(recvcnt, recvprfl, rdispls); MPI_Type_free(&MPI_tuple); /* * 3d-reduction ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_reductiontime += (t1-t0); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tReduction time: %lf\n", (t1-t0)); #endif #ifdef TIMING t0 = MPI_Wtime(); t2 = MPI_Wtime(); #endif /* * 3d-merge starts * */ SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\tMultiway Merge: %lf\n", (t3-t2)); mcl3d_layer_nnzc += merged_tuples->getnnz(); #endif //Create SpDCCol and delete merged_tuples; UDERO * localResultant = new UDERO(*merged_tuples, false); // Do not delete elements of recvChunks, because that would give segmentation fault due to double free //delete [] recvTuples; ::operator delete(recvTuples); for(int i = 0; i < recvChunks.size(); i++){ recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault delete recvChunks[i]; } vector<SpTuples<IU,NUO>*>().swap(recvChunks); /* * 3d-merge ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_3dmergetime += (t1-t0); if(myrank == 0) fprintf(stderr, "[SUMMA3D]\t3D Merge time: %lf\n", (t1-t0)); #endif std::shared_ptr<CommGrid3D> grid3d; grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial())); SpParMat3D<IU, NUO, UDERO> C(localResultant, grid3d, A.isColSplit(), A.isSpecial()); return C; } template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat3D<IU, NUO, UDERO> MemEfficientSpGEMM3D(SpParMat3D<IU, NU1, UDERA> & A, SpParMat3D<IU, NU2, UDERB> & B, int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory){ int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); typedef typename UDERA::LocalIT LIA; typedef typename UDERB::LocalIT LIB; typedef typename UDERO::LocalIT LIC; /* * Check if A and B are multipliable * */ if(A.getncol() != B.getnrow()){ std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } /* * If provided number of phase is too low or too high then reset value of phase as 1 * */ if(phases < 1 || phases >= B.getncol()){ SpParHelper::Print("[MemEfficientSpGEMM3D]\tThe value of phases is too small or large. Resetting to 1.\n"); phases = 1; } double t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; // To time different parts of the function #ifdef TIMING MPI_Barrier(B.getcommgrid3D()->GetWorld()); t0 = MPI_Wtime(); #endif /* * If per process memory is provided then calculate number of phases * Otherwise, proceed to multiplication. * */ if(perProcessMemory > 0) { int p, calculatedPhases; MPI_Comm_size(A.getcommgrid3D()->GetLayerWorld(),&p); int64_t perNNZMem_in = sizeof(IU)*2 + sizeof(NU1); int64_t perNNZMem_out = sizeof(IU)*2 + sizeof(NUO); int64_t lannz = A.GetLayerMat()->getlocalnnz(); int64_t gannz = 0; // Get maximum number of nnz owned by one process MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetWorld()); //int64_t ginputMem = gannz * perNNZMem_in * 4; // Four pieces per process: one piece of own A and B, one piece of received A and B int64_t ginputMem = gannz * perNNZMem_in * 5; // One extra copy for safety // Estimate per layer nnz after multiplication. After this estimation each process would know an estimation of // how many nnz the corresponding layer will have after the layerwise operation. int64_t asquareNNZ = EstPerProcessNnzSUMMA(*(A.GetLayerMat()), *(B.GetLayerMat()), true); int64_t gasquareNNZ; MPI_Allreduce(&asquareNNZ, &gasquareNNZ, 1, MPIType<int64_t>(), MPI_MAX, A.getcommgrid3D()->GetFiberWorld()); // Atmost two copies, one of a process's own, another received from fiber reduction int64_t gasquareMem = gasquareNNZ * perNNZMem_out * 2; // Calculate estimated average degree after multiplication int64_t d = ceil( ( ( gasquareNNZ / B.getcommgrid3D()->GetGridLayers() ) * sqrt(p) ) / B.GetLayerMat()->getlocalcols() ); // Calculate per column nnz how left after k-select. Minimum of average degree and k-select parameters. int64_t k = std::min(int64_t(std::max(selectNum, recoverNum)), d ); //estimate output memory int64_t postKselectOutputNNZ = ceil(( (B.GetLayerMat()->getlocalcols() / B.getcommgrid3D()->GetGridLayers() ) * k)/sqrt(p)); // If kselect is run int64_t postKselectOutputMem = postKselectOutputNNZ * perNNZMem_out * 2; double remainingMem = perProcessMemory*1000000000 - ginputMem - postKselectOutputMem; int64_t kselectMem = B.GetLayerMat()->getlocalcols() * k * sizeof(NUO) * 3; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory if(remainingMem > 0){ calculatedPhases = ceil( (gasquareMem + kselectMem) / remainingMem ); // If kselect is run } else calculatedPhases = -1; int gCalculatedPhases; MPI_Allreduce(&calculatedPhases, &gCalculatedPhases, 1, MPI_INT, MPI_MAX, A.getcommgrid3D()->GetFiberWorld()); if(gCalculatedPhases > phases) phases = gCalculatedPhases; } else{ // Do nothing } #ifdef TIMING MPI_Barrier(B.getcommgrid3D()->GetWorld()); t1 = MPI_Wtime(); mcl3d_symbolictime+=(t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tSymbolic stage time: %lf\n", (t1-t0)); #endif /* * Calculate, accross fibers, which process should get how many columns after redistribution * */ vector<LIB> divisions3d; // Calculate split boundaries as if all contents of the layer is being re-distributed along fiber // These boundaries will be used later on B.CalculateColSplitDistributionOfLayer(divisions3d); /* * Split B according to calculated number of phases * For better load balancing split B into nlayers*phases chunks * */ vector<UDERB*> PiecesOfB; vector<UDERB*> tempPiecesOfB; UDERB CopyB = *(B.GetLayerMat()->seqptr()); CopyB.ColSplit(divisions3d, tempPiecesOfB); // Split B into `nlayers` chunks at first for(int i = 0; i < tempPiecesOfB.size(); i++){ vector<UDERB*> temp; tempPiecesOfB[i]->ColSplit(phases, temp); // Split each chunk of B into `phases` chunks for(int j = 0; j < temp.size(); j++){ PiecesOfB.push_back(temp[j]); } } vector<UDERO> toconcatenate; if(myrank == 0){ fprintf(stderr, "[MemEfficientSpGEMM3D]\tRunning with phase: %d\n", phases); } for(int p = 0; p < phases; p++){ /* * At the start of each phase take appropriate pieces from previously created pieces of local B matrix * Appropriate means correct pieces so that 3D-merge can be properly load balanced. * */ vector<LIB> lbDivisions3d; // load balance friendly division LIB totalLocalColumnInvolved = 0; vector<UDERB*> targetPiecesOfB; // Pieces of B involved in current phase for(int i = 0; i < PiecesOfB.size(); i++){ if(i % phases == p){ targetPiecesOfB.push_back(new UDERB(*(PiecesOfB[i]))); lbDivisions3d.push_back(PiecesOfB[i]->getncol()); totalLocalColumnInvolved += PiecesOfB[i]->getncol(); } } /* * Create new local matrix by concatenating appropriately picked pieces * */ UDERB * OnePieceOfB = new UDERB(0, (B.GetLayerMat())->seqptr()->getnrow(), totalLocalColumnInvolved, 0); OnePieceOfB->ColConcatenate(targetPiecesOfB); vector<UDERB*>().swap(targetPiecesOfB); /* * Create a new layer-wise distributed matrix with the newly created local matrix for this phase * This matrix is used in SUMMA multiplication of respective layer * */ SpParMat<IU, NU2, UDERB> OnePieceOfBLayer(OnePieceOfB, A.getcommgrid3D()->layerWorld); #ifdef TIMING t0 = MPI_Wtime(); #endif /* * SUMMA Starts * */ int stages, dummy; // last two parameters of ProductGrid are ignored for this multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.GetLayerMat()->getcommgrid()).get(), (OnePieceOfBLayer.getcommgrid()).get(), stages, dummy, dummy); IU C_m = A.GetLayerMat()->seqptr()->getnrow(); IU C_n = OnePieceOfBLayer.seqptr()->getncol(); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.GetLayerMat()->seqptr()), ARecvSizes, (A.GetLayerMat()->getcommgrid())->GetRowWorld() ); SpParHelper::GetSetSizes( *(OnePieceOfBLayer.seqptr()), BRecvSizes, (OnePieceOfBLayer.getcommgrid())->GetColWorld() ); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.GetLayerMat()->getcommgrid())->GetRankInProcRow(); int Bself = (OnePieceOfBLayer.getcommgrid())->GetRankInProcCol(); double Abcast_time = 0; double Bbcast_time = 0; double Local_multiplication_time = 0; for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself){ ARecv = A.GetLayerMat()->seqptr(); // shallow-copy } else{ ess.resize(UDERA::esscount); for(int j=0; j<UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } #ifdef TIMING t2 = MPI_Wtime(); #endif if (Aself != i) { ARecv->Create(ess); } Arr<IU,NU1> Aarrinfo = ARecv->GetArrays(); for(unsigned int idx = 0; idx < Aarrinfo.indarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.indarrs[idx].addr, Aarrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetRowWorld()); } for(unsigned int idx = 0; idx < Aarrinfo.numarrs.size(); ++idx) { MPI_Bcast(Aarrinfo.numarrs[idx].addr, Aarrinfo.numarrs[idx].count, MPIType<NU1>(), i, GridC->GetRowWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Abcasttime += (t3-t2); Abcast_time += (t3-t2); #endif ess.clear(); if(i == Bself){ BRecv = OnePieceOfBLayer.seqptr(); // shallow-copy } else{ ess.resize(UDERB::esscount); for(int j=0; j<UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } MPI_Barrier(A.GetLayerMat()->getcommgrid()->GetWorld()); #ifdef TIMING t2 = MPI_Wtime(); #endif if (Bself != i) { BRecv->Create(ess); } Arr<IU,NU2> Barrinfo = BRecv->GetArrays(); for(unsigned int idx = 0; idx < Barrinfo.indarrs.size(); ++idx) { MPI_Bcast(Barrinfo.indarrs[idx].addr, Barrinfo.indarrs[idx].count, MPIType<IU>(), i, GridC->GetColWorld()); } for(unsigned int idx = 0; idx < Barrinfo.numarrs.size(); ++idx) { MPI_Bcast(Barrinfo.numarrs[idx].addr, Barrinfo.numarrs[idx].count, MPIType<NU2>(), i, GridC->GetColWorld()); } #ifdef TIMING t3 = MPI_Wtime(); mcl3d_Bbcasttime += (t3-t2); Bbcast_time += (t3-t2); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_cont = LocalSpGEMMHash<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself, // 'delete B' condition false); // not to sort each column #ifdef TIMING t3 = MPI_Wtime(); mcl3d_localspgemmtime += (t3-t2); Local_multiplication_time += (t3-t2); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); #ifdef TIMING t2 = MPI_Wtime(); #endif SpTuples<IU,NUO> * C_tuples = MultiwayMergeHash<SR>(tomerge, C_m, C_n, true, false); // Delete input arrays and do not sort #ifdef TIMING t3 = MPI_Wtime(); mcl3d_SUMMAmergetime += (t3-t2); #endif #ifdef TIMING if(myrank == 0){ fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAbcast_time: %lf\n", p, Abcast_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tBbcast_time: %lf\n", p, Bbcast_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tLocal_multiplication_time: %lf\n", p, Local_multiplication_time); fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA Merge time: %lf\n", p, (t3-t2)); } #endif /* * SUMMA Ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_SUMMAtime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tSUMMA time: %lf\n", p, (t1-t0)); #endif #ifdef TIMING mcl3d_proc_nnzc_pre_red += C_tuples->getnnz(); #endif /* * 3d-reduction starts * */ #ifdef TIMING t0 = MPI_Wtime(); t2 = MPI_Wtime(); #endif MPI_Datatype MPI_tuple; MPI_Type_contiguous(sizeof(std::tuple<LIC,LIC,NUO>), MPI_CHAR, &MPI_tuple); MPI_Type_commit(&MPI_tuple); /* * Create a profile with information regarding data to be sent and received between layers * These memory allocation needs to be `int` specifically because some of these arrays would be used in communication * This is requirement is for MPI as MPI_Alltoallv takes pointer to integer exclusively as count and displacement * */ int * sendcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * sendprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * sdispls = new int[A.getcommgrid3D()->GetGridLayers()](); int * recvcnt = new int[A.getcommgrid3D()->GetGridLayers()]; int * recvprfl = new int[A.getcommgrid3D()->GetGridLayers()*3]; int * rdispls = new int[A.getcommgrid3D()->GetGridLayers()](); vector<IU> lbDivisions3dPrefixSum(lbDivisions3d.size()); lbDivisions3dPrefixSum[0] = 0; std::partial_sum(lbDivisions3d.begin(), lbDivisions3d.end()-1, lbDivisions3dPrefixSum.begin()+1); ColLexiCompare<IU,NUO> comp; IU totsend = C_tuples->getnnz(); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of alltoall information: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif #pragma omp parallel for for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ IU start_col = lbDivisions3dPrefixSum[i]; IU end_col = lbDivisions3dPrefixSum[i] + lbDivisions3d[i]; std::tuple<IU, IU, NUO> search_tuple_start(0, start_col, NUO()); std::tuple<IU, IU, NUO> search_tuple_end(0, end_col, NUO()); std::tuple<IU, IU, NUO>* start_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_start, comp); std::tuple<IU, IU, NUO>* end_it = std::lower_bound(C_tuples->tuples, C_tuples->tuples + C_tuples->getnnz(), search_tuple_end, comp); // This type casting is important from semantic point of view sendcnt[i] = (int)(end_it - start_it); sendprfl[i*3+0] = (int)(sendcnt[i]); // Number of nonzeros in ith chunk sendprfl[i*3+1] = (int)(A.GetLayerMat()->seqptr()->getnrow()); // Number of rows in ith chunk sendprfl[i*3+2] = (int)(lbDivisions3d[i]); // Number of columns in ith chunk } std::partial_sum(sendcnt, sendcnt+A.getcommgrid3D()->GetGridLayers()-1, sdispls+1); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoall data ready: %lf\n", p, (t3-t2)); #endif // Send profile ready. Now need to update the tuples to reflect correct column id after column split. #ifdef TIMING t2 = MPI_Wtime(); #endif for(int i=0; i < A.getcommgrid3D()->GetGridLayers(); ++i){ #pragma omp parallel for schedule(static) for(int j = 0; j < sendcnt[i]; j++){ std::get<1>(C_tuples->tuples[sdispls[i]+j]) = std::get<1>(C_tuples->tuples[sdispls[i]+j]) - lbDivisions3dPrefixSum[i]; } } #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tGetting Alltoallv data ready: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoall(sendprfl, 3, MPI_INT, recvprfl, 3, MPI_INT, A.getcommgrid3D()->fiberWorld); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoall: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif for(int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++) recvcnt[i] = recvprfl[i*3]; std::partial_sum(recvcnt, recvcnt+A.getcommgrid3D()->GetGridLayers()-1, rdispls+1); IU totrecv = std::accumulate(recvcnt,recvcnt+A.getcommgrid3D()->GetGridLayers(), static_cast<IU>(0)); std::tuple<LIC,LIC,NUO>* recvTuples = static_cast<std::tuple<LIC,LIC,NUO>*> (::operator new (sizeof(std::tuple<LIC,LIC,NUO>[totrecv]))); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAllocation of receive data: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif MPI_Alltoallv(C_tuples->tuples, sendcnt, sdispls, MPI_tuple, recvTuples, recvcnt, rdispls, MPI_tuple, A.getcommgrid3D()->fiberWorld); delete C_tuples; #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tAlltoallv: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif vector<SpTuples<IU, NUO>*> recvChunks(A.getcommgrid3D()->GetGridLayers()); #pragma omp parallel for for (int i = 0; i < A.getcommgrid3D()->GetGridLayers(); i++){ recvChunks[i] = new SpTuples<LIC, NUO>(recvcnt[i], recvprfl[i*3+1], recvprfl[i*3+2], recvTuples + rdispls[i], true, false); } #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\trecvChunks creation: %lf\n", p, (t3-t2)); #endif #ifdef TIMING t2 = MPI_Wtime(); #endif // Free all memory except tempTuples; Because that is holding data of newly created local matrices after receiving. DeleteAll(sendcnt, sendprfl, sdispls); DeleteAll(recvcnt, recvprfl, rdispls); MPI_Type_free(&MPI_tuple); #ifdef TIMING t3 = MPI_Wtime(); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMemory freeing: %lf\n", p, (t3-t2)); #endif /* * 3d-reduction ends * */ #ifdef TIMING t1 = MPI_Wtime(); mcl3d_reductiontime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tReduction time: %lf\n", p, (t1-t0)); #endif #ifdef TIMING t0 = MPI_Wtime(); #endif /* * 3d-merge starts * */ SpTuples<IU, NUO> * merged_tuples = MultiwayMergeHash<SR, IU, NUO>(recvChunks, recvChunks[0]->getnrow(), recvChunks[0]->getncol(), false, false); // Do not delete #ifdef TIMING t1 = MPI_Wtime(); mcl3d_3dmergetime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\t3D Merge time: %lf\n", p, (t1-t0)); mcl3d_proc_nnzc_post_red += merged_tuples->getnnz(); #endif /* * 3d-merge ends * */ // Discard merged result if not needed anymore //delete merged_tuples; #ifdef TIMING t0 = MPI_Wtime(); #endif // Do not delete elements of recvChunks, because that would give segmentation fault due to double free ::operator delete(recvTuples); for(int i = 0; i < recvChunks.size(); i++){ recvChunks[i]->tuples_deleted = true; // Temporary patch to avoid memory leak and segfault delete recvChunks[i]; // As the patch is used, now delete each element of recvChunks } vector<SpTuples<IU,NUO>*>().swap(recvChunks); // As the patch is used, now delete recvChunks // This operation is not needed if result can be used and discareded right away // This operation is being done because it is needed by MCLPruneRecoverySelect UDERO * phaseResultant = new UDERO(*merged_tuples, false); SpParMat<IU, NUO, UDERO> phaseResultantLayer(phaseResultant, A.getcommgrid3D()->layerWorld); MCLPruneRecoverySelect(phaseResultantLayer, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion); #ifdef TIMING t1 = MPI_Wtime(); mcl3d_kselecttime += (t1-t0); if(myrank == 0) fprintf(stderr, "[MemEfficientSpGEMM3D]\tPhase: %d\tMCLPruneRecoverySelect time: %lf\n",p, (t1-t0)); #endif toconcatenate.push_back(phaseResultantLayer.seq()); #ifdef TIMING if(myrank == 0) fprintf(stderr, "***\n"); #endif } for(int i = 0; i < PiecesOfB.size(); i++) delete PiecesOfB[i]; std::shared_ptr<CommGrid3D> grid3d; grid3d.reset(new CommGrid3D(A.getcommgrid3D()->GetWorld(), A.getcommgrid3D()->GetGridLayers(), A.getcommgrid3D()->GetGridRows(), A.getcommgrid3D()->GetGridCols(), A.isSpecial())); UDERO * localResultant = new UDERO(0, A.GetLayerMat()->seqptr()->getnrow(), divisions3d[A.getcommgrid3D()->rankInFiber], 0); localResultant->ColConcatenate(toconcatenate); SpParMat3D<IU, NUO, UDERO> C3D(localResultant, grid3d, A.isColSplit(), A.isSpecial()); return C3D; } } #endif
openmp_array2.c
///TAFFO_TEST_ARGS -Xvra -propagate-all -fopenmp #include <omp.h> #include <stdio.h> #define MAX_N (100) float compute_thread_result(int index, float private_multiplier) { return index * private_multiplier; } int main(int argc, char *argv[]) { float result_container[MAX_N] __attribute((annotate("target('array') scalar(range(0,100) final)"))); float multipliers_container[MAX_N] __attribute__((annotate("target('multipliers_container') scalar(range(0,1000) final)"))); float result __attribute__((annotate("target('result') scalar(range(0,10000) final)"))) = 0; int i = 0; float private_multiplier __attribute__((annotate("target('private_multiplier') scalar(range(0,25) final)"))); #pragma omp parallel for private(private_multiplier) num_threads(4) schedule(static) { for (i = 0; i < MAX_N; i++) { private_multiplier = 5.43; // Do computation on private variables private_multiplier *= omp_get_thread_num(); multipliers_container[i] = private_multiplier; // Save in the shared variable accessed result_container[i] = compute_thread_result(i, multipliers_container[i]); } } for (i = 0; i < MAX_N; i++) { result += result_container[i]; } printf("result: %f\n", result); }
Parallel.c
#include <omp.h> #include <stdio.h> #include <time.h> // threads int main() { //time clock_t start = clock(); clock_t diff; int num_steps = 100000; double step = 1.0 / num_steps; double area; #pragma omp parallel { int id = omp_get_thread_num(); int nthreads = omp_get_num_threads(); printf("we have %d threads\n", nthreads); double sum = 0.0; int i; for (i = id; i < num_steps; i += nthreads){ double x = (i + .5) * step; double y = 4/ (1 + (x * x)); sum += y; } #pragma omp critical { area += sum * step; } } printf("%lf\n", area); diff = clock() - start; int msec = diff * 1000 / CLOCKS_PER_SEC; printf("Time taken: %d seconds %d milliseconds", msec/1000, msec%1000); }
jacobi-block-task.cuda.c
#include "hclib.h" #ifdef __cplusplus #include "hclib_cpp.h" #include "hclib_system.h" #ifdef __CUDACC__ #include "hclib_cuda.h" #endif #endif # include "poisson.h" /* #pragma omp task/taskwait version of SWEEP. */ void sweep (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int it; int block_x, block_y; if (block_size == 0) block_size = nx; int max_blocks_x = (nx / block_size); int max_blocks_y = (ny / block_size); { { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { copy_block(nx, ny, block_x, block_y, u_, unew_, block_size); } } ; // Compute a new estimate. for (block_x = 0; block_x < max_blocks_x; block_x++) { for (block_y = 0; block_y < max_blocks_y; block_y++) { compute_estimate(block_x, block_y, u_, unew_, f_, dx, dy, nx, ny, block_size); } } ; } } } }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colorspace-private.h" #include "magick/configure.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #elif defined(MAGICKCORE_HAVE_LCMS2_H) #include <wchar.h> #include "lcms2.h" #elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H) #include <lcms/lcms.h> #else #include "lcms.h" #endif #endif /* Define declarations. */ #if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000) #define cmsSigCmykData icSigCmykData #define cmsSigGrayData icSigGrayData #define cmsSigLabData icSigLabData #define cmsSigLuvData icSigLuvData #define cmsSigRgbData icSigRgbData #define cmsSigXYZData icSigXYZData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLinkClass icSigLinkClass #define cmsColorSpaceSignature icColorSpaceSignature #define cmsUInt32Number DWORD #define cmsSetLogErrorHandler(handler) cmsSetErrorHandler(handler) #define cmsCreateTransformTHR(context,source_profile,source_type, \ target_profile,target_type,intent,flags) cmsCreateTransform(source_profile, \ source_type,target_profile,target_type,intent,flags); #define cmsOpenProfileFromMemTHR(context,profile,length) \ cmsOpenProfileFromMem(profile,length) #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickSignature); image->color_profile.length=clone_image->color_profile.length; image->color_profile.info=clone_image->color_profile.info; image->iptc_profile.length=clone_image->iptc_profile.length; image->iptc_profile.info=clone_image->iptc_profile.info; if (clone_image->profiles != (void *) NULL) image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { char key[MaxTextExtent]; const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); (void) CopyMagickString(key,name,MaxTextExtent); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,key); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) static unsigned short **DestroyPixelThreadSet(unsigned short **pixels) { register ssize_t i; assert(pixels != (unsigned short **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (unsigned short *) NULL) pixels[i]=(unsigned short *) RelinquishMagickMemory(pixels[i]); pixels=(unsigned short **) RelinquishMagickMemory(pixels); return(pixels); } static unsigned short **AcquirePixelThreadSet(const size_t columns, const size_t channels) { register ssize_t i; unsigned short **pixels; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(unsigned short **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (unsigned short **) NULL) return((unsigned short **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(unsigned short *) AcquireQuantumMemory(columns,channels* sizeof(**pixels)); if (pixels[i] == (unsigned short *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { register ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(Image *image, const cmsHPROFILE source_profile,const cmsUInt32Number source_type, const cmsHPROFILE target_profile,const cmsUInt32Number target_type, const int intent,const cmsUInt32Number flags) { cmsHTRANSFORM *transform; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) ResetMagickMemory(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(image,source_profile,source_type, target_profile,target_type,intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } #endif static MagickBooleanType SetAdobeRGB1998ImageProfile(Image *image) { static unsigned char AdobeRGB1998Profile[] = { 0x00, 0x00, 0x02, 0x30, 0x41, 0x44, 0x42, 0x45, 0x02, 0x10, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xd0, 0x00, 0x08, 0x00, 0x0b, 0x00, 0x13, 0x00, 0x33, 0x00, 0x3b, 0x61, 0x63, 0x73, 0x70, 0x41, 0x50, 0x50, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x6f, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x41, 0x44, 0x42, 0x45, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x00, 0x00, 0x32, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x30, 0x00, 0x00, 0x00, 0x6b, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x01, 0x9c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x01, 0xb0, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xc4, 0x00, 0x00, 0x00, 0x0e, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xd4, 0x00, 0x00, 0x00, 0x0e, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x01, 0xe4, 0x00, 0x00, 0x00, 0x0e, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x01, 0xf4, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x08, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x02, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x30, 0x20, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x73, 0x20, 0x49, 0x6e, 0x63, 0x6f, 0x72, 0x70, 0x6f, 0x72, 0x61, 0x74, 0x65, 0x64, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x41, 0x64, 0x6f, 0x62, 0x65, 0x20, 0x52, 0x47, 0x42, 0x20, 0x28, 0x31, 0x39, 0x39, 0x38, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x33, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x18, 0x00, 0x00, 0x4f, 0xa5, 0x00, 0x00, 0x04, 0xfc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x8d, 0x00, 0x00, 0xa0, 0x2c, 0x00, 0x00, 0x0f, 0x95, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x31, 0x00, 0x00, 0x10, 0x2f, 0x00, 0x00, 0xbe, 0x9c }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (GetImageProfile(image,"icm") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(AdobeRGB1998Profile)); SetStringInfoDatum(profile,AdobeRGB1998Profile); status=SetImageProfile(image,"icm",profile); profile=DestroyStringInfo(profile); return(status); } static MagickBooleanType SetsRGBImageProfile(Image *image) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0xee, 0x20, 0x00, 0x00, 0x00, 0x00, 0x04, 0x20, 0x00, 0x00, 0x73, 0x70, 0x61, 0x63, 0x52, 0x47, 0x42, 0x20, 0x4c, 0x61, 0x62, 0x20, 0x07, 0xd7, 0x00, 0x07, 0x00, 0x19, 0x00, 0x00, 0x00, 0x05, 0x00, 0x25, 0x61, 0x63, 0x73, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x34, 0x56, 0x2a, 0xbf, 0x99, 0x4c, 0xcd, 0x06, 0x6d, 0x2c, 0x57, 0x21, 0xd0, 0xd6, 0x8c, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x76, 0x41, 0x32, 0x42, 0x30, 0x00, 0x00, 0x01, 0x68, 0x00, 0x00, 0x74, 0x10, 0x41, 0x32, 0x42, 0x31, 0x00, 0x00, 0x75, 0x78, 0x00, 0x00, 0x01, 0xb4, 0x42, 0x32, 0x41, 0x30, 0x00, 0x00, 0x77, 0x2c, 0x00, 0x00, 0x74, 0x34, 0x42, 0x32, 0x41, 0x31, 0x00, 0x00, 0xeb, 0x60, 0x00, 0x00, 0x01, 0xfc, 0x72, 0x69, 0x67, 0x30, 0x00, 0x00, 0xed, 0x5c, 0x00, 0x00, 0x00, 0x0c, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0xed, 0x68, 0x00, 0x00, 0x00, 0x14, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0xed, 0x7c, 0x00, 0x00, 0x00, 0x76, 0x63, 0x68, 0x61, 0x64, 0x00, 0x00, 0xed, 0xf4, 0x00, 0x00, 0x00, 0x2c, 0x6d, 0x6c, 0x75, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x65, 0x6e, 0x55, 0x53, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x73, 0x00, 0x52, 0x00, 0x47, 0x00, 0x42, 0x00, 0x20, 0x00, 0x76, 0x00, 0x34, 0x00, 0x20, 0x00, 0x49, 0x00, 0x43, 0x00, 0x43, 0x00, 0x20, 0x00, 0x70, 0x00, 0x72, 0x00, 0x65, 0x00, 0x66, 0x00, 0x65, 0x00, 0x72, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x63, 0x00, 0x65, 0x00, 0x20, 0x00, 0x70, 0x00, 0x65, 0x00, 0x72, 0x00, 0x63, 0x00, 0x65, 0x00, 0x70, 0x00, 0x74, 0x00, 0x75, 0x00, 0x61, 0x00, 0x6c, 0x00, 0x20, 0x00, 0x69, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x65, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x20, 0x00, 0x62, 0x00, 0x65, 0x00, 0x74, 0x00, 0x61, 0x00, 0x00, 0x6d, 0x41, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x73, 0xec, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x07, 0xf7, 0x80, 0x80, 0x80, 0x80, 0x07, 0xb9, 0x84, 0x8b, 0x77, 0x79, 0x08, 0x42, 0x88, 0x52, 0x6e, 0xa3, 0x09, 0x61, 0x8c, 0x4a, 0x65, 0xcf, 0x0c, 0x7a, 0x90, 0x54, 0x5d, 0xda, 0x0e, 0x9b, 0x94, 0x6f, 0x56, 0x76, 0x11, 0x50, 0x98, 0x8f, 0x4f, 0x12, 0x15, 0x38, 0x9c, 0x52, 0x48, 0xda, 0x19, 0x01, 0x9f, 0xe2, 0x42, 0x8a, 0x1b, 0xc9, 0xa2, 0xab, 0x3d, 0xed, 0x1e, 0x44, 0xa4, 0xf7, 0x3a, 0x07, 0x20, 0xea, 0xa6, 0xf5, 0x36, 0x4e, 0x23, 0xb9, 0xa8, 0xc8, 0x32, 0x8e, 0x26, 0x3f, 0xaa, 0x95, 0x2e, 0xb6, 0x28, 0x93, 0xac, 0x8a, 0x2a, 0x00, 0x2c, 0x1a, 0xae, 0x50, 0x25, 0xb4, 0x2f, 0xd0, 0xb0, 0x03, 0x1f, 0xae, 0x09, 0x99, 0x78, 0x06, 0x86, 0x58, 0x0e, 0x30, 0x7a, 0x97, 0x7c, 0x67, 0x0f, 0xe9, 0x7e, 0xdc, 0x73, 0x23, 0x11, 0xc0, 0x83, 0x38, 0x6a, 0x65, 0x13, 0xf7, 0x87, 0x57, 0x61, 0xb0, 0x16, 0xab, 0x8c, 0x2a, 0x59, 0x90, 0x19, 0x4c, 0x90, 0xb3, 0x51, 0x93, 0x1c, 0x1d, 0x94, 0xdc, 0x4a, 0xc6, 0x1f, 0x61, 0x98, 0xb2, 0x44, 0x5d, 0x22, 0xf0, 0x9c, 0x26, 0x3e, 0xe5, 0x26, 0x09, 0x9e, 0xf0, 0x3a, 0xf2, 0x28, 0x93, 0xa1, 0x42, 0x37, 0x50, 0x2a, 0xfa, 0xa3, 0x41, 0x33, 0xc2, 0x2d, 0x94, 0xa5, 0x12, 0x30, 0x4e, 0x30, 0x7a, 0xa6, 0xe2, 0x2c, 0x40, 0x33, 0x7c, 0xa8, 0x97, 0x28, 0x53, 0x36, 0x46, 0xaa, 0x11, 0x24, 0xb9, 0x14, 0xa0, 0x6f, 0xfd, 0x8b, 0x7e, 0x16, 0x12, 0x72, 0x51, 0x82, 0x38, 0x17, 0x8e, 0x75, 0x89, 0x78, 0xb8, 0x19, 0x41, 0x79, 0x8d, 0x6f, 0x58, 0x1b, 0x5b, 0x7e, 0x44, 0x66, 0x03, 0x1d, 0x91, 0x83, 0x28, 0x5d, 0x2e, 0x20, 0x02, 0x88, 0x51, 0x54, 0xe0, 0x22, 0xee, 0x8c, 0xf9, 0x4d, 0x69, 0x25, 0xb5, 0x90, 0xfb, 0x46, 0xfd, 0x28, 0xd7, 0x94, 0xde, 0x40, 0xde, 0x2b, 0xed, 0x98, 0x02, 0x3c, 0x7c, 0x2e, 0xeb, 0x9a, 0xef, 0x38, 0xa2, 0x31, 0x2d, 0x9d, 0x34, 0x34, 0xc0, 0x34, 0x81, 0xa0, 0x1c, 0x31, 0x6d, 0x37, 0x3c, 0xa2, 0x00, 0x2d, 0xdc, 0x3a, 0x18, 0xa3, 0xaa, 0x2a, 0x60, 0x3c, 0xea, 0xa5, 0x39, 0x26, 0xf8, 0x1e, 0x94, 0x67, 0x75, 0x91, 0x82, 0x1e, 0xa4, 0x69, 0x88, 0x88, 0x21, 0x1e, 0xe3, 0x6b, 0xe5, 0x7e, 0xb3, 0x20, 0x35, 0x70, 0x06, 0x74, 0xad, 0x21, 0xdf, 0x74, 0x1d, 0x6b, 0x22, 0x24, 0x66, 0x79, 0x12, 0x61, 0xdc, 0x27, 0x4c, 0x7f, 0x05, 0x59, 0x16, 0x29, 0x82, 0x84, 0x14, 0x50, 0xb9, 0x2c, 0x47, 0x88, 0x95, 0x49, 0xf7, 0x2e, 0xe9, 0x8c, 0xb6, 0x43, 0xd3, 0x31, 0xca, 0x90, 0x89, 0x3e, 0x96, 0x34, 0xa8, 0x93, 0xc3, 0x3a, 0x2d, 0x36, 0x9e, 0x96, 0x42, 0x35, 0xdd, 0x3a, 0x21, 0x99, 0xb5, 0x32, 0x62, 0x3d, 0xa8, 0x9d, 0x00, 0x2f, 0x16, 0x40, 0xa8, 0x9f, 0x35, 0x2b, 0xbe, 0x43, 0x60, 0xa0, 0xb3, 0x28, 0x71, 0x29, 0xe2, 0x5f, 0x52, 0x97, 0xba, 0x2a, 0x28, 0x60, 0x90, 0x8e, 0xff, 0x27, 0x5c, 0x61, 0x56, 0x84, 0xf9, 0x28, 0x0f, 0x65, 0x30, 0x7b, 0x76, 0x28, 0x8d, 0x69, 0x34, 0x70, 0xfd, 0x2b, 0xb8, 0x6e, 0xb3, 0x67, 0x7e, 0x2e, 0x03, 0x73, 0xfe, 0x5e, 0x2c, 0x30, 0xba, 0x7a, 0x2d, 0x55, 0x63, 0x33, 0x3e, 0x7f, 0xb7, 0x4d, 0x5a, 0x35, 0xcd, 0x84, 0x17, 0x46, 0xe6, 0x37, 0x97, 0x87, 0xee, 0x40, 0x51, 0x39, 0x92, 0x8b, 0x6d, 0x3b, 0x98, 0x3c, 0x0f, 0x8e, 0xd5, 0x37, 0x44, 0x40, 0x6e, 0x93, 0x15, 0x34, 0x12, 0x43, 0x82, 0x96, 0x65, 0x30, 0x77, 0x46, 0x54, 0x99, 0x05, 0x2d, 0x13, 0x49, 0x16, 0x9b, 0x48, 0x29, 0xb2, 0x36, 0x51, 0x56, 0x9d, 0x9f, 0x6b, 0x35, 0x31, 0x57, 0x96, 0x96, 0xa7, 0x33, 0xe1, 0x58, 0xbd, 0x8d, 0xba, 0x31, 0xa5, 0x5a, 0xed, 0x83, 0x91, 0x30, 0xc1, 0x5e, 0x2a, 0x78, 0xa8, 0x32, 0x72, 0x63, 0x12, 0x6d, 0xf8, 0x35, 0x37, 0x68, 0xbf, 0x64, 0x17, 0x38, 0x47, 0x6e, 0xfc, 0x5b, 0x0c, 0x3a, 0x54, 0x74, 0xd4, 0x52, 0x29, 0x3c, 0xd9, 0x7a, 0x56, 0x4a, 0xfa, 0x3e, 0x76, 0x7e, 0xd2, 0x44, 0x73, 0x41, 0xe3, 0x83, 0xf0, 0x3f, 0x20, 0x44, 0x04, 0x87, 0xff, 0x3a, 0x4a, 0x47, 0x70, 0x8c, 0x34, 0x36, 0x59, 0x49, 0xfc, 0x8f, 0xc0, 0x32, 0x50, 0x4c, 0x75, 0x92, 0xd1, 0x2e, 0x96, 0x4f, 0x1c, 0x95, 0x83, 0x2b, 0x0e, 0x41, 0xa7, 0x4e, 0x2e, 0xa6, 0xe8, 0x41, 0x83, 0x4f, 0x0e, 0x9e, 0xac, 0x3f, 0xbb, 0x4f, 0xb4, 0x95, 0xf3, 0x3d, 0x72, 0x50, 0xa2, 0x8c, 0x7f, 0x39, 0xf4, 0x52, 0x60, 0x81, 0x9d, 0x3a, 0x93, 0x57, 0x0f, 0x76, 0x2f, 0x3c, 0xb3, 0x5d, 0x0e, 0x6a, 0xba, 0x3f, 0x2d, 0x63, 0x34, 0x60, 0xbf, 0x41, 0x19, 0x69, 0x5e, 0x57, 0x0f, 0x43, 0xdd, 0x6f, 0xc6, 0x4f, 0x24, 0x46, 0x36, 0x75, 0x5f, 0x48, 0x32, 0x49, 0x01, 0x7a, 0xd3, 0x42, 0x17, 0x4b, 0xe5, 0x80, 0x21, 0x3d, 0x3b, 0x4e, 0x64, 0x84, 0x86, 0x38, 0xab, 0x50, 0xd7, 0x88, 0x76, 0x34, 0x6a, 0x53, 0x26, 0x8b, 0xf6, 0x30, 0x4d, 0x55, 0xea, 0x8f, 0x33, 0x2c, 0x73, 0x4d, 0xf6, 0x46, 0x11, 0xaf, 0x9c, 0x4c, 0xfd, 0x46, 0x5d, 0xa7, 0x32, 0x4c, 0x05, 0x46, 0xc8, 0x9e, 0xd0, 0x49, 0xa4, 0x47, 0x4d, 0x95, 0xb6, 0x45, 0x8c, 0x47, 0x08, 0x8b, 0xd4, 0x45, 0x18, 0x4b, 0x30, 0x80, 0x86, 0x44, 0xb0, 0x50, 0x4e, 0x73, 0x74, 0x46, 0xb3, 0x56, 0xf7, 0x67, 0x96, 0x48, 0xcf, 0x5d, 0xbd, 0x5c, 0xef, 0x4b, 0x59, 0x64, 0x53, 0x54, 0x0b, 0x4d, 0xd4, 0x6a, 0xb8, 0x4c, 0x81, 0x50, 0x4b, 0x70, 0xf5, 0x45, 0xfc, 0x53, 0x2b, 0x76, 0x96, 0x3f, 0xd8, 0x56, 0x1c, 0x7b, 0xd3, 0x3b, 0x5e, 0x58, 0x72, 0x80, 0x77, 0x36, 0xe7, 0x5a, 0x86, 0x84, 0x38, 0x32, 0x8c, 0x5c, 0xd7, 0x87, 0xc1, 0x2e, 0x56, 0x5b, 0xdb, 0x3e, 0xa8, 0xb6, 0xa9, 0x5c, 0x3f, 0x3f, 0xc7, 0xae, 0xc5, 0x59, 0xbd, 0x3f, 0x54, 0xa7, 0x01, 0x56, 0xf5, 0x3e, 0xa9, 0x9f, 0x11, 0x52, 0x65, 0x3d, 0x76, 0x95, 0x7f, 0x50, 0x92, 0x3f, 0xfc, 0x8a, 0x5d, 0x4e, 0xec, 0x43, 0x42, 0x7d, 0xfe, 0x4f, 0x22, 0x48, 0xdf, 0x70, 0xe6, 0x50, 0x52, 0x51, 0x18, 0x63, 0xbb, 0x53, 0x2a, 0x58, 0x87, 0x59, 0xe2, 0x55, 0xb7, 0x5f, 0x4a, 0x51, 0x37, 0x58, 0x53, 0x65, 0xdb, 0x4a, 0x4b, 0x5a, 0xc6, 0x6c, 0x22, 0x43, 0xc0, 0x5d, 0xfa, 0x72, 0x39, 0x3e, 0x52, 0x60, 0x4f, 0x77, 0x45, 0x39, 0x9d, 0x62, 0x64, 0x7b, 0xf7, 0x35, 0x07, 0x64, 0x4f, 0x80, 0x50, 0x30, 0x6f, 0x69, 0x29, 0x39, 0x52, 0xbc, 0x17, 0x68, 0x89, 0x39, 0x1d, 0xb5, 0x2f, 0x67, 0xa8, 0x38, 0xcc, 0xae, 0x48, 0x65, 0x09, 0x37, 0x42, 0xa7, 0x28, 0x61, 0x6e, 0x35, 0x0f, 0x9f, 0xc8, 0x5d, 0xc2, 0x35, 0x36, 0x95, 0x38, 0x5b, 0x74, 0x36, 0xf2, 0x89, 0xd0, 0x5a, 0x24, 0x3a, 0xa1, 0x7c, 0x7b, 0x5a, 0x18, 0x42, 0xc8, 0x6e, 0x14, 0x5c, 0x08, 0x4c, 0x7e, 0x60, 0xba, 0x5e, 0x2a, 0x53, 0xe5, 0x57, 0x4b, 0x61, 0x04, 0x5a, 0x9a, 0x4f, 0x60, 0x62, 0xa5, 0x61, 0x02, 0x48, 0x4e, 0x65, 0xb2, 0x67, 0x84, 0x42, 0x4a, 0x68, 0x73, 0x6d, 0xaf, 0x3c, 0xd7, 0x6a, 0x8c, 0x72, 0xe6, 0x37, 0xd2, 0x6c, 0x5c, 0x77, 0x8c, 0x32, 0xdf, 0x76, 0x98, 0x35, 0xc7, 0xc0, 0x60, 0x75, 0xb6, 0x35, 0x08, 0xb9, 0x92, 0x74, 0xa1, 0x34, 0x23, 0xb3, 0x81, 0x72, 0x89, 0x32, 0x62, 0xad, 0x41, 0x6f, 0x12, 0x2e, 0xab, 0xa6, 0xfb, 0x6d, 0x9e, 0x2c, 0xd6, 0xa0, 0x87, 0x68, 0x70, 0x27, 0xbd, 0x96, 0xf7, 0x66, 0x36, 0x2e, 0x4e, 0x88, 0xb0, 0x64, 0x9f, 0x33, 0x3a, 0x7a, 0x39, 0x65, 0xee, 0x3d, 0x4e, 0x6b, 0x73, 0x67, 0x3d, 0x46, 0xc9, 0x5e, 0xaa, 0x69, 0x7e, 0x4e, 0xf6, 0x55, 0x1c, 0x6a, 0xdb, 0x55, 0x5c, 0x4d, 0x23, 0x6d, 0x41, 0x5c, 0x55, 0x46, 0xd5, 0x6f, 0xd8, 0x62, 0xda, 0x40, 0xc7, 0x72, 0xa3, 0x69, 0x2e, 0x3b, 0x63, 0x74, 0xc8, 0x6e, 0xc5, 0x35, 0xe2, 0x82, 0x4d, 0x33, 0x40, 0xc2, 0x49, 0x82, 0x11, 0x32, 0xad, 0xbc, 0x78, 0x81, 0x6d, 0x31, 0x69, 0xb7, 0x34, 0x80, 0x6a, 0x2f, 0xaa, 0xb2, 0x18, 0x7d, 0xd6, 0x2d, 0x34, 0xac, 0x2c, 0x7a, 0xcf, 0x2a, 0xdd, 0xa5, 0x6a, 0x77, 0xd9, 0x28, 0x05, 0x9e, 0x7f, 0x74, 0x2b, 0x25, 0x74, 0x94, 0x3c, 0x71, 0xa5, 0x27, 0x0a, 0x87, 0x7e, 0x70, 0xdb, 0x2f, 0x37, 0x78, 0x00, 0x71, 0x2b, 0x38, 0x14, 0x68, 0xb5, 0x72, 0x5c, 0x42, 0x16, 0x5c, 0x86, 0x73, 0xe1, 0x49, 0xfb, 0x52, 0xd4, 0x76, 0x24, 0x51, 0xba, 0x4b, 0xcd, 0x78, 0x6b, 0x58, 0xc3, 0x45, 0xb3, 0x7a, 0xcc, 0x5f, 0x68, 0x3f, 0xce, 0x7c, 0xbe, 0x65, 0x5c, 0x39, 0xcb, 0x8c, 0x5f, 0x31, 0xaa, 0xc3, 0xa1, 0x8c, 0x4f, 0x31, 0x69, 0xbe, 0x08, 0x8b, 0xc6, 0x30, 0x09, 0xb9, 0x36, 0x89, 0x9e, 0x2c, 0x91, 0xb4, 0xbb, 0x89, 0x65, 0x2b, 0xf4, 0xaf, 0xe0, 0x86, 0x7a, 0x29, 0x70, 0xa9, 0x9b, 0x82, 0xa8, 0x25, 0x8f, 0xa3, 0x61, 0x80, 0x8d, 0x24, 0xe3, 0x9b, 0x42, 0x7d, 0x4b, 0x23, 0x46, 0x91, 0x15, 0x7b, 0x7f, 0x25, 0x1b, 0x84, 0x29, 0x78, 0xfa, 0x26, 0xbc, 0x74, 0xd7, 0x7a, 0x0d, 0x32, 0x6c, 0x65, 0x6c, 0x79, 0x72, 0x3b, 0xc2, 0x58, 0x72, 0x7c, 0xbd, 0x45, 0xc7, 0x50, 0x81, 0x7e, 0xe0, 0x4d, 0x48, 0x49, 0xab, 0x82, 0xc7, 0x55, 0x66, 0x44, 0x65, 0x84, 0xde, 0x5c, 0x08, 0x3d, 0xeb, 0x94, 0xfd, 0x30, 0x41, 0xc4, 0xc4, 0x95, 0x28, 0x30, 0x45, 0xbf, 0x49, 0x94, 0xeb, 0x2f, 0x4e, 0xba, 0xba, 0x94, 0x83, 0x2e, 0x1b, 0xb6, 0x43, 0x93, 0xeb, 0x2c, 0xad, 0xb1, 0xcb, 0x92, 0x90, 0x2b, 0x21, 0xac, 0x81, 0x90, 0x64, 0x29, 0x1f, 0xa6, 0x8f, 0x8e, 0x89, 0x27, 0xe7, 0xa0, 0x73, 0x8b, 0x2a, 0x25, 0xcb, 0x97, 0x97, 0x87, 0x3b, 0x23, 0x09, 0x8d, 0xa5, 0x86, 0xde, 0x26, 0x61, 0x81, 0x16, 0x85, 0xb1, 0x28, 0x7c, 0x72, 0xa5, 0x84, 0xc3, 0x31, 0xfd, 0x63, 0xe5, 0x87, 0x22, 0x3d, 0x36, 0x59, 0x39, 0x89, 0x4b, 0x45, 0xcf, 0x51, 0x22, 0x8b, 0x83, 0x4c, 0xd7, 0x4a, 0x3f, 0x8d, 0x55, 0x53, 0x38, 0x43, 0x82, 0x9c, 0xec, 0x2e, 0x8f, 0xc6, 0x17, 0x9d, 0x65, 0x2e, 0xf3, 0xc0, 0x8e, 0x9d, 0x5c, 0x2e, 0x63, 0xbc, 0x0d, 0x9d, 0x1f, 0x2d, 0x95, 0xb7, 0xaf, 0x9c, 0xcf, 0x2c, 0xaa, 0xb3, 0x4f, 0x9c, 0x29, 0x2b, 0x97, 0xae, 0xad, 0x9a, 0x6a, 0x2a, 0x22, 0xa8, 0xef, 0x98, 0x9c, 0x28, 0x95, 0xa3, 0x35, 0x96, 0xae, 0x27, 0x3d, 0x9c, 0x92, 0x94, 0xbe, 0x26, 0x82, 0x94, 0x0d, 0x93, 0x20, 0x26, 0xd8, 0x89, 0xfa, 0x92, 0x49, 0x28, 0xed, 0x7e, 0x0f, 0x91, 0x2b, 0x2b, 0xc1, 0x70, 0x7d, 0x91, 0x66, 0x35, 0x27, 0x63, 0xeb, 0x92, 0xcc, 0x3e, 0x0c, 0x59, 0x67, 0x93, 0xa1, 0x45, 0x06, 0x50, 0xca, 0x95, 0x88, 0x4b, 0x97, 0x49, 0x27, 0xa4, 0xaa, 0x2c, 0x45, 0xc7, 0x7c, 0xa5, 0x3c, 0x2c, 0xde, 0xc1, 0xe9, 0xa5, 0x4a, 0x2c, 0x51, 0xbd, 0x54, 0xa5, 0x21, 0x2b, 0x86, 0xb8, 0xfc, 0xa4, 0xe4, 0x2a, 0x94, 0xb4, 0xb2, 0xa4, 0x9c, 0x29, 0xa0, 0xb0, 0x62, 0xa3, 0xa7, 0x28, 0xd5, 0xab, 0x19, 0xa2, 0x6b, 0x27, 0xf4, 0xa5, 0xaf, 0xa0, 0xdf, 0x26, 0xfc, 0xa0, 0x39, 0x9f, 0x3a, 0x26, 0xcf, 0x98, 0xa1, 0x9d, 0x86, 0x26, 0x2e, 0x90, 0xc1, 0x9c, 0x76, 0x27, 0xb9, 0x86, 0x76, 0x9b, 0xe6, 0x2a, 0x97, 0x7b, 0x2d, 0x9b, 0x77, 0x2f, 0x5d, 0x6f, 0x0d, 0x9b, 0xb5, 0x36, 0xb3, 0x63, 0x1a, 0x9c, 0x83, 0x3e, 0x53, 0x58, 0xdd, 0x9d, 0x4f, 0x44, 0x68, 0x50, 0x24, 0xad, 0x53, 0x29, 0xae, 0xc8, 0xbb, 0xad, 0xaa, 0x2a, 0x17, 0xc3, 0x3e, 0xad, 0x8c, 0x29, 0x8a, 0xbe, 0x8b, 0xad, 0x85, 0x28, 0x3f, 0xba, 0x5a, 0xad, 0x69, 0x27, 0x20, 0xb6, 0x02, 0xad, 0x69, 0x25, 0xb6, 0xb1, 0xcf, 0xad, 0x3f, 0x24, 0xbd, 0xad, 0x01, 0xad, 0x2e, 0x23, 0xa6, 0xa7, 0xd1, 0xad, 0x0a, 0x22, 0x8a, 0xa2, 0xba, 0xac, 0x5d, 0x22, 0x20, 0x9c, 0xac, 0xaa, 0xe1, 0x22, 0x87, 0x95, 0x1b, 0xa8, 0x9e, 0x23, 0x62, 0x8d, 0x14, 0xa6, 0x68, 0x25, 0xe6, 0x83, 0x24, 0xa5, 0x34, 0x2a, 0x3e, 0x78, 0xb7, 0xa5, 0x30, 0x2f, 0x4f, 0x6e, 0x51, 0xa5, 0x2b, 0x36, 0x11, 0x62, 0xcc, 0xa5, 0x35, 0x3d, 0x7b, 0x57, 0xfb, 0x08, 0x6b, 0x87, 0x9b, 0x85, 0x31, 0x0b, 0xb6, 0x8a, 0xea, 0x7b, 0x53, 0x0d, 0x77, 0x8d, 0xea, 0x72, 0xc3, 0x0f, 0xc9, 0x91, 0x52, 0x6a, 0x49, 0x11, 0xce, 0x94, 0xba, 0x61, 0xe9, 0x14, 0x20, 0x98, 0xae, 0x5a, 0x3f, 0x16, 0xe0, 0x9c, 0x78, 0x52, 0x6c, 0x1a, 0x29, 0xa0, 0x27, 0x4b, 0xac, 0x1c, 0xfe, 0xa3, 0x54, 0x45, 0xd4, 0x1f, 0xae, 0xa6, 0x0c, 0x40, 0x50, 0x22, 0x31, 0xa8, 0x42, 0x3c, 0x4e, 0x24, 0xfa, 0xaa, 0x15, 0x38, 0x6f, 0x27, 0xc8, 0xab, 0xca, 0x34, 0xac, 0x2a, 0xc8, 0xad, 0x4c, 0x31, 0x11, 0x2d, 0xd2, 0xaf, 0x01, 0x2c, 0xc5, 0x30, 0xf0, 0xb0, 0xa7, 0x28, 0x25, 0x33, 0xbf, 0xb1, 0xee, 0x24, 0xa8, 0x12, 0xcb, 0x7e, 0xef, 0x89, 0xc4, 0x16, 0x0d, 0x80, 0x80, 0x80, 0x80, 0x16, 0xc7, 0x84, 0x95, 0x77, 0x6f, 0x18, 0x23, 0x88, 0x43, 0x6e, 0x78, 0x1a, 0x45, 0x8c, 0x52, 0x65, 0x8d, 0x1c, 0xab, 0x90, 0x90, 0x5c, 0xfe, 0x1e, 0x84, 0x94, 0xac, 0x55, 0x04, 0x21, 0x1b, 0x98, 0x92, 0x4d, 0x9e, 0x24, 0x72, 0x9c, 0x49, 0x47, 0x4f, 0x28, 0x02, 0x9f, 0xe1, 0x40, 0xca, 0x2a, 0x4f, 0xa2, 0x45, 0x3c, 0xdf, 0x2c, 0xb1, 0xa4, 0x5d, 0x39, 0x3e, 0x2f, 0x36, 0xa6, 0x3e, 0x35, 0xb9, 0x31, 0xf4, 0xa7, 0xf3, 0x32, 0x47, 0x34, 0xe0, 0xa9, 0x8f, 0x2e, 0xa6, 0x37, 0xcb, 0xab, 0x0d, 0x2a, 0xea, 0x3a, 0xa5, 0xac, 0x5e, 0x27, 0x75, 0x1b, 0x59, 0x77, 0x11, 0x8e, 0x94, 0x1d, 0x20, 0x78, 0xe1, 0x85, 0xa4, 0x1e, 0xf2, 0x7b, 0x36, 0x7c, 0xb7, 0x20, 0xc7, 0x7f, 0x0a, 0x73, 0x5f, 0x22, 0x53, 0x83, 0x4e, 0x6a, 0x1f, 0x24, 0x10, 0x87, 0x8b, 0x60, 0xf1, 0x26, 0x4b, 0x8c, 0x52, 0x58, 0x95, 0x28, 0x8e, 0x90, 0x97, 0x50, 0x66, 0x2b, 0x30, 0x94, 0x8e, 0x4a, 0x1c, 0x2e, 0x22, 0x98, 0x68, 0x43, 0x98, 0x31, 0x30, 0x9b, 0xbe, 0x3e, 0x41, 0x34, 0x0c, 0x9e, 0x87, 0x3a, 0x63, 0x36, 0x99, 0xa0, 0xf2, 0x36, 0xcd, 0x39, 0x1a, 0xa2, 0xfa, 0x33, 0x59, 0x3b, 0xc6, 0xa4, 0xcf, 0x2f, 0xe8, 0x3e, 0x94, 0xa6, 0x48, 0x2c, 0x91, 0x41, 0x41, 0xa7, 0x95, 0x29, 0x49, 0x25, 0x6d, 0x6f, 0x0e, 0x94, 0x00, 0x26, 0xc3, 0x70, 0xbf, 0x8b, 0x1e, 0x27, 0xc4, 0x72, 0xae, 0x82, 0x5b, 0x28, 0xd2, 0x75, 0xc0, 0x78, 0xfc, 0x2a, 0x2d, 0x79, 0x6a, 0x6f, 0x84, 0x2c, 0x48, 0x7e, 0x54, 0x66, 0x0b, 0x2e, 0x10, 0x83, 0x32, 0x5c, 0xec, 0x2f, 0xd7, 0x87, 0xe9, 0x54, 0x72, 0x32, 0x5d, 0x8c, 0x38, 0x4d, 0x36, 0x35, 0x3b, 0x90, 0x79, 0x47, 0x1c, 0x37, 0xe1, 0x94, 0x68, 0x40, 0x8c, 0x3a, 0x8b, 0x97, 0x9c, 0x3c, 0x38, 0x3d, 0x33, 0x9a, 0xa8, 0x38, 0x46, 0x3f, 0xf0, 0x9d, 0xb9, 0x34, 0x88, 0x42, 0xa6, 0xa0, 0x36, 0x31, 0x00, 0x45, 0x2f, 0xa1, 0xac, 0x2d, 0xba, 0x47, 0xbd, 0xa3, 0x0b, 0x2a, 0x6d, 0x2f, 0xb6, 0x66, 0xef, 0x9a, 0x3a, 0x31, 0x11, 0x67, 0xf0, 0x91, 0xa9, 0x31, 0x74, 0x69, 0xde, 0x88, 0xb3, 0x32, 0x40, 0x6c, 0x12, 0x7f, 0xba, 0x33, 0x73, 0x70, 0x0a, 0x75, 0x59, 0x34, 0x65, 0x74, 0x0d, 0x6b, 0xd4, 0x36, 0x1b, 0x78, 0xd3, 0x62, 0x72, 0x38, 0x58, 0x7e, 0x58, 0x59, 0x92, 0x3a, 0x34, 0x83, 0x33, 0x51, 0x32, 0x3c, 0x84, 0x87, 0xb8, 0x4a, 0x9a, 0x3f, 0x36, 0x8c, 0x52, 0x44, 0x0a, 0x41, 0xee, 0x90, 0x63, 0x3e, 0x79, 0x44, 0x21, 0x93, 0x89, 0x3a, 0x64, 0x46, 0x77, 0x96, 0x8b, 0x36, 0x7a, 0x48, 0xe7, 0x99, 0x66, 0x32, 0xae, 0x4b, 0x71, 0x9b, 0xf4, 0x2f, 0x10, 0x4e, 0x27, 0x9e, 0x12, 0x2b, 0xb8, 0x3c, 0xb2, 0x5e, 0x60, 0xa1, 0x0c, 0x3d, 0x96, 0x5f, 0x9b, 0x98, 0xc8, 0x3e, 0x74, 0x60, 0xd3, 0x90, 0xc3, 0x3d, 0x4d, 0x62, 0xb8, 0x87, 0x2e, 0x3c, 0xca, 0x65, 0x41, 0x7d, 0x52, 0x3d, 0x67, 0x69, 0x8b, 0x72, 0x65, 0x3f, 0x35, 0x6e, 0x91, 0x68, 0xa8, 0x40, 0x8a, 0x73, 0x65, 0x5f, 0x4a, 0x42, 0x59, 0x79, 0x03, 0x56, 0x9a, 0x44, 0xba, 0x7e, 0x3c, 0x4e, 0xc2, 0x46, 0xc6, 0x83, 0x1c, 0x48, 0x75, 0x48, 0xe9, 0x87, 0xae, 0x41, 0xde, 0x4b, 0x43, 0x8b, 0xc7, 0x3c, 0xf7, 0x4d, 0x97, 0x8f, 0x80, 0x38, 0xbe, 0x4f, 0xac, 0x92, 0xb3, 0x34, 0xae, 0x51, 0xeb, 0x95, 0x7e, 0x30, 0xbb, 0x54, 0x7c, 0x97, 0xdd, 0x2d, 0x2d, 0x47, 0xa3, 0x55, 0xf3, 0xa8, 0x3b, 0x49, 0x26, 0x57, 0x0d, 0x9f, 0xf1, 0x48, 0xd0, 0x58, 0x0c, 0x97, 0xb4, 0x48, 0xd5, 0x59, 0x29, 0x8f, 0x77, 0x47, 0xdf, 0x5b, 0x8c, 0x85, 0x43, 0x48, 0x10, 0x5f, 0x49, 0x7a, 0x65, 0x48, 0x39, 0x63, 0xea, 0x6f, 0x37, 0x49, 0x40, 0x69, 0x00, 0x65, 0x80, 0x4b, 0x4c, 0x6e, 0x94, 0x5c, 0xac, 0x4c, 0xce, 0x74, 0x17, 0x54, 0x0c, 0x4e, 0xc0, 0x79, 0x5f, 0x4c, 0x82, 0x51, 0x09, 0x7e, 0xbe, 0x46, 0x17, 0x52, 0xef, 0x83, 0x6b, 0x3f, 0xce, 0x54, 0xfd, 0x87, 0x6b, 0x3b, 0x44, 0x57, 0x20, 0x8b, 0x13, 0x36, 0xf2, 0x59, 0x51, 0x8e, 0x6f, 0x32, 0xc9, 0x5b, 0x8f, 0x91, 0x67, 0x2e, 0xc2, 0x53, 0xe9, 0x4d, 0xcf, 0xaf, 0x9b, 0x55, 0x00, 0x4e, 0xf5, 0xa7, 0x28, 0x56, 0x47, 0x50, 0x4b, 0x9e, 0xd9, 0x54, 0xeb, 0x51, 0x3e, 0x96, 0x81, 0x53, 0xc8, 0x52, 0x5e, 0x8d, 0xe7, 0x52, 0x71, 0x54, 0xcc, 0x83, 0x41, 0x52, 0x32, 0x58, 0xfd, 0x77, 0x9b, 0x53, 0x48, 0x5e, 0xe2, 0x6c, 0x04, 0x53, 0x9d, 0x63, 0xe2, 0x62, 0x10, 0x55, 0x50, 0x69, 0x88, 0x59, 0x96, 0x57, 0x73, 0x6f, 0x6f, 0x51, 0xb0, 0x59, 0x28, 0x74, 0xcb, 0x4a, 0x80, 0x5b, 0x1e, 0x7a, 0x05, 0x43, 0xd3, 0x5d, 0x55, 0x7f, 0x0b, 0x3e, 0x36, 0x5f, 0x21, 0x83, 0x20, 0x39, 0x9d, 0x60, 0xf1, 0x86, 0xbd, 0x35, 0x22, 0x62, 0xe1, 0x8a, 0x24, 0x30, 0x9f, 0x60, 0xa9, 0x46, 0x1b, 0xb6, 0xcb, 0x61, 0xf9, 0x47, 0x64, 0xae, 0xaa, 0x61, 0x99, 0x47, 0xef, 0xa6, 0xd9, 0x61, 0x77, 0x48, 0xb7, 0x9e, 0xd4, 0x60, 0x48, 0x4a, 0x10, 0x95, 0xc2, 0x5f, 0x7e, 0x4b, 0xfc, 0x8c, 0x42, 0x5f, 0x45, 0x4f, 0x3b, 0x81, 0x88, 0x5d, 0x55, 0x53, 0xa6, 0x74, 0xf3, 0x5d, 0x47, 0x59, 0x6f, 0x69, 0x08, 0x5e, 0xec, 0x5f, 0xd7, 0x5e, 0xf4, 0x60, 0x02, 0x64, 0xe2, 0x56, 0xc1, 0x61, 0xaa, 0x6a, 0x52, 0x4f, 0x1d, 0x63, 0xcb, 0x70, 0x52, 0x48, 0x8e, 0x65, 0x68, 0x75, 0x63, 0x41, 0xbd, 0x67, 0x49, 0x7a, 0x40, 0x3c, 0x75, 0x69, 0x1e, 0x7e, 0xd4, 0x37, 0xaa, 0x6a, 0xc5, 0x82, 0xb0, 0x32, 0xd2, 0x6d, 0xf2, 0x40, 0x9d, 0xbc, 0x14, 0x6f, 0x09, 0x41, 0x75, 0xb4, 0xc1, 0x6f, 0x94, 0x42, 0x05, 0xad, 0x89, 0x6e, 0xf8, 0x42, 0x1f, 0xa6, 0x63, 0x6e, 0x37, 0x42, 0x50, 0x9f, 0x02, 0x6c, 0x02, 0x43, 0x37, 0x95, 0x8a, 0x6a, 0x04, 0x44, 0xbe, 0x8b, 0x3d, 0x68, 0x81, 0x47, 0x5e, 0x7f, 0xaf, 0x68, 0x32, 0x4e, 0x12, 0x72, 0x7e, 0x67, 0xf8, 0x54, 0x37, 0x66, 0x40, 0x68, 0xfa, 0x5a, 0x16, 0x5c, 0x7c, 0x6a, 0xc2, 0x60, 0x06, 0x54, 0x73, 0x6c, 0x3e, 0x65, 0x8b, 0x4d, 0x3b, 0x6e, 0x12, 0x6b, 0x73, 0x46, 0xaa, 0x70, 0x06, 0x70, 0xfd, 0x40, 0x00, 0x71, 0x83, 0x75, 0xc6, 0x3a, 0xc0, 0x73, 0x14, 0x7a, 0x56, 0x35, 0x7a, 0x7b, 0x0c, 0x3c, 0x47, 0xc0, 0x26, 0x7b, 0x68, 0x3c, 0x88, 0xb9, 0x73, 0x7b, 0xe4, 0x3c, 0xe8, 0xb2, 0xf5, 0x7b, 0xa7, 0x3d, 0x0b, 0xac, 0x37, 0x7a, 0x9e, 0x3c, 0xb9, 0xa5, 0x64, 0x79, 0x4b, 0x3c, 0x78, 0x9e, 0x18, 0x77, 0x1d, 0x3d, 0x0a, 0x94, 0xba, 0x75, 0x61, 0x3e, 0xf6, 0x89, 0xe3, 0x73, 0x91, 0x42, 0x24, 0x7d, 0x45, 0x72, 0x8f, 0x48, 0x1f, 0x6f, 0xd2, 0x73, 0x7b, 0x4f, 0x67, 0x63, 0xab, 0x73, 0xcc, 0x55, 0x0f, 0x5a, 0x35, 0x74, 0xe8, 0x5a, 0xce, 0x52, 0x22, 0x76, 0xc0, 0x61, 0x2f, 0x4b, 0x82, 0x78, 0x38, 0x66, 0xef, 0x45, 0x08, 0x79, 0xea, 0x6c, 0x5e, 0x3e, 0xae, 0x7b, 0xa5, 0x71, 0x8f, 0x38, 0xdb, 0x86, 0x2b, 0x39, 0x21, 0xc2, 0xa4, 0x86, 0x76, 0x39, 0x38, 0xbc, 0x70, 0x86, 0x73, 0x38, 0xe6, 0xb6, 0xbe, 0x86, 0x5c, 0x38, 0x75, 0xb1, 0x20, 0x85, 0x46, 0x37, 0xdd, 0xaa, 0xbc, 0x83, 0xe3, 0x37, 0x23, 0xa4, 0x1d, 0x82, 0x5b, 0x36, 0xaf, 0x9c, 0x8f, 0x80, 0xb2, 0x37, 0x11, 0x93, 0x37, 0x7f, 0x72, 0x39, 0x2c, 0x87, 0xf2, 0x7e, 0xaf, 0x3d, 0x67, 0x7b, 0x27, 0x7d, 0x63, 0x43, 0x34, 0x6d, 0x46, 0x7d, 0xcb, 0x4a, 0x68, 0x60, 0xa0, 0x7e, 0x7a, 0x50, 0xf7, 0x57, 0xde, 0x7e, 0xd8, 0x56, 0x57, 0x4f, 0xdc, 0x80, 0xd8, 0x5d, 0x22, 0x49, 0xc3, 0x82, 0x78, 0x63, 0x11, 0x43, 0x79, 0x84, 0x08, 0x68, 0x8a, 0x3c, 0xf4, 0x90, 0x19, 0x36, 0xf1, 0xc4, 0x54, 0x90, 0x87, 0x37, 0x1d, 0xbe, 0x4f, 0x90, 0x85, 0x36, 0x8f, 0xb9, 0x2a, 0x90, 0x70, 0x35, 0xde, 0xb4, 0x1e, 0x90, 0x16, 0x35, 0x0c, 0xae, 0xfb, 0x8e, 0x85, 0x34, 0x55, 0xa8, 0x99, 0x8c, 0xc9, 0x33, 0x83, 0xa2, 0x27, 0x8b, 0x03, 0x33, 0x46, 0x9a, 0x1b, 0x89, 0x41, 0x33, 0x7a, 0x91, 0x0c, 0x88, 0x6c, 0x35, 0xdf, 0x85, 0x1b, 0x87, 0xd1, 0x39, 0xea, 0x78, 0x1d, 0x87, 0x95, 0x40, 0x6a, 0x6a, 0xd2, 0x87, 0x94, 0x47, 0x21, 0x5e, 0xb7, 0x88, 0x9b, 0x4e, 0x1e, 0x56, 0x4b, 0x89, 0x4f, 0x53, 0xc5, 0x4e, 0xe2, 0x8a, 0xe9, 0x59, 0xe2, 0x48, 0x5c, 0x8c, 0x81, 0x5f, 0xb4, 0x41, 0x94, 0x98, 0x52, 0x35, 0x14, 0xc5, 0xaa, 0x98, 0xe9, 0x35, 0x6c, 0xbf, 0xae, 0x98, 0xef, 0x34, 0xfb, 0xba, 0xd7, 0x98, 0xea, 0x34, 0x83, 0xb6, 0x09, 0x98, 0xd7, 0x33, 0xed, 0xb1, 0x53, 0x97, 0xe9, 0x33, 0x45, 0xab, 0xb0, 0x96, 0x83, 0x32, 0x7a, 0xa5, 0x9e, 0x94, 0xf8, 0x31, 0x96, 0x9f, 0x65, 0x93, 0x7a, 0x31, 0x82, 0x97, 0x14, 0x92, 0x0b, 0x31, 0xbb, 0x8e, 0x2c, 0x91, 0xc4, 0x34, 0x57, 0x82, 0x27, 0x91, 0x61, 0x38, 0x70, 0x75, 0x5f, 0x91, 0x7b, 0x3e, 0xe3, 0x68, 0xe7, 0x91, 0xa2, 0x45, 0x6e, 0x5d, 0x9e, 0x92, 0x7d, 0x4c, 0x17, 0x55, 0x45, 0x93, 0x7b, 0x51, 0xc4, 0x4d, 0xf8, 0x94, 0xd7, 0x57, 0x52, 0x46, 0xd6, 0xa0, 0x73, 0x33, 0x63, 0xc6, 0xe4, 0xa1, 0x08, 0x33, 0xc5, 0xc0, 0xff, 0xa1, 0x29, 0x33, 0x86, 0xbc, 0x3d, 0xa1, 0x31, 0x33, 0x33, 0xb7, 0xa5, 0xa1, 0x31, 0x32, 0xd3, 0xb3, 0x1a, 0xa0, 0xe5, 0x32, 0x71, 0xae, 0x3d, 0x9f, 0xd6, 0x31, 0xf7, 0xa8, 0x74, 0x9e, 0x76, 0x31, 0x36, 0xa2, 0x98, 0x9d, 0x0b, 0x30, 0xc2, 0x9b, 0xb6, 0x9b, 0xaf, 0x30, 0xb7, 0x93, 0xbc, 0x9a, 0xba, 0x31, 0xb2, 0x8a, 0x4f, 0x9a, 0x69, 0x34, 0x15, 0x7f, 0x42, 0x9a, 0x6a, 0x38, 0x1b, 0x73, 0x1b, 0x9a, 0xad, 0x3e, 0x7c, 0x67, 0x2f, 0x9a, 0xe3, 0x44, 0xcb, 0x5c, 0x85, 0x9b, 0xad, 0x4a, 0x99, 0x54, 0x40, 0x9c, 0xe4, 0x50, 0x0a, 0x4c, 0xbc, 0xa8, 0x03, 0x31, 0xa6, 0xc7, 0xc7, 0xa8, 0x86, 0x32, 0x08, 0xc2, 0x29, 0xa8, 0xa8, 0x31, 0xe4, 0xbd, 0x4c, 0xa8, 0x92, 0x31, 0x72, 0xb8, 0xcf, 0xa8, 0x71, 0x30, 0xf6, 0xb4, 0x5c, 0xa8, 0x46, 0x30, 0x6f, 0xaf, 0xf8, 0xa7, 0x81, 0x30, 0x01, 0xaa, 0x9d, 0xa6, 0xb0, 0x2f, 0x62, 0xa5, 0x36, 0xa5, 0xca, 0x2e, 0xb9, 0x9f, 0xba, 0xa4, 0xcc, 0x2e, 0xef, 0x98, 0x32, 0xa3, 0xb8, 0x2f, 0x30, 0x90, 0x98, 0xa3, 0x1b, 0x31, 0x0e, 0x87, 0x01, 0xa2, 0xd1, 0x33, 0x86, 0x7c, 0x99, 0xa2, 0xfe, 0x37, 0x61, 0x71, 0x32, 0xa3, 0x24, 0x3d, 0x98, 0x65, 0xc4, 0xa3, 0x78, 0x43, 0xa8, 0x5b, 0x70, 0xa4, 0x44, 0x49, 0x14, 0x52, 0xd8, 0xaf, 0xb1, 0x2f, 0xef, 0xc8, 0xb5, 0xb0, 0x3c, 0x30, 0x76, 0xc3, 0x16, 0xb0, 0x6a, 0x30, 0x65, 0xbe, 0x36, 0xb0, 0x35, 0x2f, 0xc4, 0xb9, 0xcb, 0xb0, 0x00, 0x2e, 0xd6, 0xb5, 0x80, 0xaf, 0xd8, 0x2d, 0xd7, 0xb1, 0x39, 0xaf, 0x97, 0x2d, 0x19, 0xac, 0x6e, 0xaf, 0x58, 0x2c, 0x64, 0xa7, 0x6b, 0xaf, 0x23, 0x2b, 0x96, 0xa2, 0x5f, 0xae, 0x92, 0x2b, 0x5a, 0x9c, 0x51, 0xad, 0xb6, 0x2b, 0xbd, 0x94, 0xee, 0xac, 0x6d, 0x2c, 0xa4, 0x8d, 0x44, 0xab, 0x1d, 0x2f, 0x4e, 0x84, 0x31, 0xaa, 0xbc, 0x32, 0x18, 0x7a, 0x63, 0xaa, 0xf3, 0x36, 0x01, 0x6f, 0xa8, 0xab, 0x03, 0x3c, 0x03, 0x64, 0xa6, 0xab, 0x8e, 0x42, 0x18, 0x5a, 0x35, 0x0e, 0xed, 0x8d, 0xe7, 0x88, 0x64, 0x11, 0x77, 0x90, 0x3a, 0x7f, 0xa1, 0x13, 0x65, 0x93, 0x22, 0x77, 0x12, 0x15, 0x32, 0x95, 0xfb, 0x6e, 0xc0, 0x17, 0x0b, 0x99, 0x55, 0x66, 0x24, 0x18, 0xee, 0x9c, 0xb7, 0x5d, 0xa2, 0x1b, 0x9c, 0xa0, 0x6d, 0x55, 0xb2, 0x1e, 0x0f, 0xa3, 0xd3, 0x4e, 0xb8, 0x20, 0x82, 0xa7, 0x08, 0x49, 0x21, 0x23, 0x42, 0xa9, 0x96, 0x43, 0x20, 0x25, 0xfb, 0xab, 0xab, 0x3e, 0x3c, 0x28, 0xe8, 0xad, 0x57, 0x3a, 0x52, 0x2b, 0xd1, 0xae, 0xf4, 0x36, 0x93, 0x2e, 0xfb, 0xb0, 0x43, 0x33, 0x12, 0x31, 0xcc, 0xb1, 0x80, 0x2f, 0x72, 0x34, 0x8e, 0xb2, 0xe4, 0x2b, 0x58, 0x37, 0x47, 0xb4, 0x17, 0x27, 0x9f, 0x18, 0x2d, 0x85, 0xc9, 0x8c, 0xc9, 0x1a, 0xb9, 0x87, 0x8a, 0x84, 0x15, 0x1c, 0x91, 0x8a, 0x3f, 0x7b, 0x3f, 0x1e, 0x2a, 0x8d, 0x72, 0x72, 0x43, 0x20, 0x27, 0x91, 0x24, 0x69, 0x6c, 0x21, 0xbc, 0x94, 0x8e, 0x60, 0xa4, 0x23, 0xa1, 0x98, 0x7b, 0x58, 0x93, 0x25, 0xfa, 0x9c, 0x2f, 0x50, 0x65, 0x29, 0x4e, 0x9f, 0xe5, 0x49, 0xe2, 0x2b, 0xdc, 0xa3, 0x13, 0x43, 0xb9, 0x2e, 0x45, 0xa5, 0x9d, 0x3e, 0x9d, 0x30, 0xc6, 0xa7, 0x8c, 0x3b, 0x05, 0x33, 0x7e, 0xa9, 0x43, 0x37, 0x8f, 0x36, 0x5c, 0xaa, 0xc8, 0x34, 0x2c, 0x39, 0x53, 0xac, 0x1d, 0x30, 0xcc, 0x3c, 0x3c, 0xad, 0x65, 0x2d, 0x50, 0x3f, 0x11, 0xae, 0x92, 0x2a, 0x01, 0x20, 0x9f, 0x7e, 0x62, 0x91, 0x63, 0x23, 0x1c, 0x7f, 0x9e, 0x88, 0xc8, 0x25, 0xd0, 0x80, 0x80, 0x80, 0x80, 0x27, 0x01, 0x84, 0x7d, 0x77, 0x3d, 0x28, 0x5b, 0x88, 0x22, 0x6e, 0x10, 0x2a, 0x20, 0x8c, 0x4b, 0x64, 0xca, 0x2c, 0x23, 0x90, 0x6e, 0x5c, 0x0d, 0x2d, 0xe7, 0x94, 0x44, 0x53, 0xe8, 0x30, 0x43, 0x98, 0x16, 0x4c, 0xb8, 0x33, 0x37, 0x9b, 0xde, 0x46, 0x31, 0x36, 0x3c, 0x9f, 0x5a, 0x3f, 0xba, 0x38, 0x8d, 0xa1, 0xbb, 0x3c, 0x23, 0x3a, 0xfb, 0xa3, 0xd4, 0x38, 0xa3, 0x3d, 0x97, 0xa5, 0xb4, 0x35, 0x3b, 0x40, 0x51, 0xa7, 0x56, 0x31, 0xdc, 0x42, 0xee, 0xa8, 0xb1, 0x2e, 0x84, 0x45, 0x79, 0xa9, 0xe9, 0x2b, 0x34, 0x2a, 0x39, 0x76, 0x83, 0x96, 0xde, 0x2c, 0x65, 0x77, 0xa4, 0x8e, 0x2a, 0x2e, 0x41, 0x79, 0x1e, 0x85, 0xa8, 0x30, 0x15, 0x7b, 0x23, 0x7c, 0xdf, 0x31, 0x72, 0x7e, 0xce, 0x73, 0x63, 0x32, 0xd2, 0x83, 0x18, 0x6a, 0x01, 0x34, 0x5c, 0x87, 0x3f, 0x60, 0xad, 0x36, 0x25, 0x8b, 0xb5, 0x58, 0x48, 0x38, 0x45, 0x8f, 0xc1, 0x50, 0x1d, 0x3a, 0x9b, 0x93, 0xe1, 0x49, 0xbb, 0x3d, 0x16, 0x97, 0xc3, 0x42, 0xe2, 0x3f, 0x92, 0x9b, 0x0d, 0x3d, 0xd6, 0x42, 0x35, 0x9d, 0xf2, 0x3a, 0x10, 0x44, 0xcf, 0xa0, 0x75, 0x36, 0x7f, 0x47, 0x33, 0xa2, 0x54, 0x33, 0x15, 0x49, 0x94, 0xa3, 0xf9, 0x2f, 0xa3, 0x4c, 0x0f, 0xa5, 0x56, 0x2c, 0x57, 0x34, 0xb9, 0x6e, 0x4e, 0x9c, 0xa9, 0x36, 0xf7, 0x6f, 0x38, 0x94, 0x18, 0x38, 0xaf, 0x70, 0xc0, 0x8b, 0x77, 0x3a, 0x12, 0x72, 0xa6, 0x82, 0xdf, 0x3a, 0xf7, 0x75, 0x88, 0x79, 0x76, 0x3b, 0xe2, 0x79, 0x0b, 0x6f, 0xcb, 0x3d, 0x76, 0x7d, 0xb1, 0x66, 0x68, 0x3f, 0x1f, 0x82, 0x47, 0x5d, 0x67, 0x40, 0xa1, 0x86, 0xd1, 0x55, 0x11, 0x42, 0x8e, 0x8b, 0x21, 0x4d, 0x97, 0x45, 0x0e, 0x8f, 0x94, 0x47, 0x23, 0x47, 0x3c, 0x93, 0x5f, 0x40, 0xac, 0x49, 0x66, 0x96, 0x86, 0x3c, 0x71, 0x4b, 0xb0, 0x99, 0x75, 0x38, 0x88, 0x4e, 0x0d, 0x9c, 0x2d, 0x34, 0xc3, 0x50, 0x75, 0x9e, 0xae, 0x31, 0x0c, 0x53, 0x07, 0xa0, 0x8a, 0x2d, 0x9b, 0x40, 0xfe, 0x65, 0xd6, 0xa3, 0x0d, 0x42, 0x8e, 0x66, 0xda, 0x9a, 0xd2, 0x43, 0xf1, 0x67, 0xf1, 0x92, 0xaa, 0x44, 0x94, 0x69, 0xbf, 0x89, 0xb1, 0x45, 0x67, 0x6b, 0xf4, 0x80, 0x8b, 0x46, 0x43, 0x6f, 0xd8, 0x76, 0x43, 0x46, 0xdf, 0x73, 0xae, 0x6c, 0xa5, 0x48, 0x0e, 0x78, 0x1e, 0x63, 0x73, 0x49, 0xb5, 0x7c, 0xf6, 0x5a, 0xb7, 0x4b, 0x7f, 0x81, 0xbb, 0x52, 0x6e, 0x4d, 0x06, 0x86, 0x5c, 0x4b, 0x79, 0x4e, 0xf4, 0x8a, 0xd1, 0x45, 0x05, 0x51, 0x30, 0x8e, 0xd6, 0x3f, 0x32, 0x53, 0x39, 0x92, 0x38, 0x3a, 0xf5, 0x55, 0x4a, 0x95, 0x2c, 0x36, 0xe2, 0x57, 0x6f, 0x97, 0xd5, 0x32, 0xf3, 0x59, 0xc2, 0x9a, 0x44, 0x2f, 0x18, 0x4c, 0x7c, 0x5d, 0x48, 0xa9, 0x9e, 0x4f, 0x5d, 0x5e, 0x88, 0xa1, 0x2c, 0x50, 0x6c, 0x5f, 0xb6, 0x99, 0x25, 0x51, 0x29, 0x60, 0xe6, 0x91, 0x35, 0x50, 0xbf, 0x63, 0x1d, 0x87, 0xab, 0x50, 0xae, 0x65, 0xe0, 0x7d, 0xd0, 0x50, 0xf1, 0x69, 0xe2, 0x73, 0x44, 0x51, 0xe5, 0x6e, 0x69, 0x69, 0xc8, 0x53, 0x0b, 0x72, 0xe7, 0x60, 0xde, 0x54, 0x34, 0x77, 0xf2, 0x58, 0x39, 0x55, 0xf6, 0x7c, 0xb4, 0x50, 0x03, 0x57, 0xbb, 0x81, 0xcb, 0x49, 0x7c, 0x59, 0x5e, 0x86, 0x49, 0x42, 0xdf, 0x5b, 0x42, 0x8a, 0x37, 0x3d, 0xab, 0x5d, 0x3e, 0x8d, 0xbd, 0x39, 0x57, 0x5f, 0x36, 0x90, 0xe4, 0x35, 0x18, 0x61, 0x27, 0x93, 0xb5, 0x30, 0xc0, 0x58, 0x38, 0x54, 0xf1, 0xb0, 0x83, 0x5a, 0x05, 0x56, 0x2d, 0xa8, 0x55, 0x5c, 0x2e, 0x57, 0x6d, 0xa0, 0x52, 0x5c, 0x11, 0x58, 0xc8, 0x97, 0xf4, 0x5c, 0x2d, 0x5a, 0x39, 0x8f, 0x92, 0x5b, 0xd2, 0x5d, 0x03, 0x85, 0x34, 0x5b, 0xc2, 0x60, 0xbc, 0x7a, 0x65, 0x5b, 0xa9, 0x64, 0xa2, 0x70, 0x01, 0x5c, 0x4d, 0x69, 0x4f, 0x66, 0xab, 0x5d, 0x93, 0x6d, 0xe9, 0x5e, 0x1b, 0x5e, 0xeb, 0x73, 0x31, 0x55, 0xec, 0x60, 0x6e, 0x78, 0x14, 0x4e, 0x22, 0x62, 0x27, 0x7d, 0x27, 0x47, 0x78, 0x63, 0xdc, 0x81, 0xdd, 0x40, 0xe2, 0x65, 0x68, 0x85, 0xbf, 0x3c, 0x05, 0x67, 0x1e, 0x89, 0x4f, 0x37, 0x63, 0x68, 0xf7, 0x8c, 0xa6, 0x32, 0xb6, 0x64, 0xeb, 0x4d, 0x30, 0xb6, 0xf1, 0x67, 0x5b, 0x4e, 0xbb, 0xae, 0x4a, 0x68, 0xb1, 0x50, 0x02, 0xa6, 0x7e, 0x69, 0xc4, 0x51, 0x47, 0x9e, 0xdd, 0x68, 0xa8, 0x52, 0xa4, 0x96, 0x6b, 0x67, 0xd9, 0x54, 0x42, 0x8d, 0xa2, 0x66, 0xfe, 0x56, 0xcf, 0x83, 0x4d, 0x66, 0x38, 0x5a, 0xee, 0x77, 0xdb, 0x66, 0x65, 0x5f, 0xec, 0x6c, 0xc4, 0x66, 0xea, 0x64, 0x70, 0x63, 0x88, 0x67, 0xf7, 0x69, 0x2f, 0x5b, 0x3c, 0x69, 0x7c, 0x6e, 0x6b, 0x53, 0x7f, 0x6a, 0xee, 0x73, 0x84, 0x4c, 0x3b, 0x6c, 0x68, 0x78, 0x71, 0x45, 0x7d, 0x6e, 0x01, 0x7d, 0x2b, 0x3f, 0x2c, 0x6f, 0x8a, 0x81, 0x6f, 0x3a, 0x30, 0x71, 0x0d, 0x85, 0x1f, 0x35, 0x24, 0x71, 0x59, 0x47, 0x16, 0xbc, 0xe0, 0x73, 0x34, 0x48, 0x3c, 0xb4, 0xda, 0x74, 0x67, 0x49, 0x28, 0xad, 0x2b, 0x74, 0xbf, 0x49, 0xe3, 0xa5, 0xdf, 0x74, 0xf8, 0x4a, 0xce, 0x9e, 0x48, 0x74, 0x03, 0x4c, 0x3b, 0x95, 0x72, 0x73, 0x31, 0x4e, 0x0e, 0x8c, 0x19, 0x72, 0x54, 0x50, 0x6f, 0x81, 0xb9, 0x71, 0x12, 0x55, 0x51, 0x75, 0x9c, 0x70, 0xe3, 0x5a, 0x64, 0x6a, 0x5c, 0x71, 0x89, 0x5f, 0x4f, 0x60, 0xa0, 0x72, 0x98, 0x64, 0x66, 0x58, 0xc2, 0x73, 0xdd, 0x69, 0x68, 0x51, 0x18, 0x75, 0x65, 0x6e, 0xff, 0x4a, 0x66, 0x76, 0xb8, 0x73, 0xde, 0x43, 0xb9, 0x78, 0x16, 0x78, 0x78, 0x3d, 0x92, 0x79, 0x93, 0x7c, 0xf9, 0x37, 0xfd, 0x7f, 0x0c, 0x42, 0x1f, 0xc0, 0xc3, 0x7f, 0xd6, 0x42, 0xf4, 0xb9, 0xd0, 0x80, 0xa2, 0x43, 0xd2, 0xb2, 0xd5, 0x80, 0xc9, 0x44, 0x5d, 0xab, 0xcf, 0x80, 0x84, 0x44, 0xb2, 0xa4, 0xdc, 0x7f, 0xfb, 0x45, 0x3d, 0x9d, 0x59, 0x7e, 0x91, 0x46, 0x32, 0x94, 0x87, 0x7d, 0x56, 0x47, 0xdd, 0x8a, 0xaf, 0x7c, 0x51, 0x4a, 0x1d, 0x7f, 0xf8, 0x7b, 0xf2, 0x50, 0x13, 0x73, 0x55, 0x7b, 0xc8, 0x55, 0x73, 0x67, 0xcb, 0x7c, 0x3a, 0x5a, 0x5e, 0x5e, 0x37, 0x7d, 0x14, 0x5f, 0xc7, 0x56, 0x59, 0x7e, 0x1e, 0x64, 0xd9, 0x4e, 0xfe, 0x7f, 0x82, 0x6a, 0x64, 0x48, 0xa6, 0x80, 0xfb, 0x6f, 0x75, 0x42, 0x21, 0x82, 0x45, 0x74, 0x42, 0x3b, 0xc1, 0x8a, 0x0f, 0x3e, 0x66, 0xc3, 0x42, 0x8a, 0xfe, 0x3e, 0xf3, 0xbc, 0xa8, 0x8b, 0x91, 0x3f, 0x62, 0xb6, 0x98, 0x8c, 0x4a, 0x40, 0x07, 0xb0, 0x65, 0x8b, 0x98, 0x40, 0x0f, 0xa9, 0xd0, 0x8a, 0xd2, 0x40, 0x10, 0xa3, 0x32, 0x89, 0xb5, 0x40, 0x6b, 0x9b, 0x9d, 0x88, 0x43, 0x41, 0x23, 0x93, 0x0b, 0x87, 0x50, 0x43, 0x06, 0x88, 0xa8, 0x86, 0x85, 0x45, 0xe3, 0x7d, 0x3d, 0x85, 0xfb, 0x4b, 0x21, 0x70, 0xb3, 0x86, 0x85, 0x51, 0x5b, 0x64, 0x99, 0x86, 0xa7, 0x56, 0x7f, 0x5b, 0xd2, 0x87, 0x2a, 0x5b, 0xa7, 0x54, 0x50, 0x88, 0x62, 0x61, 0x05, 0x4d, 0x4e, 0x89, 0xb0, 0x66, 0x74, 0x46, 0xc2, 0x8b, 0x01, 0x6b, 0x94, 0x3f, 0xe7, 0x93, 0x45, 0x3b, 0xb3, 0xc5, 0x53, 0x94, 0x37, 0x3c, 0x35, 0xbe, 0xb3, 0x94, 0x82, 0x3c, 0x35, 0xb9, 0x42, 0x94, 0xd0, 0x3c, 0x3e, 0xb3, 0xcb, 0x94, 0xc9, 0x3c, 0x36, 0xae, 0x1c, 0x93, 0xf0, 0x3c, 0x08, 0xa7, 0xbf, 0x93, 0x0c, 0x3b, 0xce, 0xa1, 0x5f, 0x91, 0xfe, 0x3c, 0x26, 0x99, 0x86, 0x90, 0xdc, 0x3c, 0xb6, 0x91, 0x32, 0x90, 0xd9, 0x3f, 0x28, 0x86, 0x1e, 0x90, 0x79, 0x42, 0xb3, 0x7a, 0x3b, 0x8f, 0xee, 0x47, 0xc5, 0x6e, 0x13, 0x90, 0x75, 0x4d, 0xc7, 0x61, 0x50, 0x90, 0xd1, 0x53, 0x79, 0x59, 0xca, 0x91, 0x47, 0x58, 0x82, 0x52, 0xc1, 0x92, 0x64, 0x5d, 0xb4, 0x4b, 0xa9, 0x93, 0xc0, 0x63, 0x06, 0x44, 0x74, 0x9b, 0x72, 0x39, 0x82, 0xc6, 0xcb, 0x9c, 0x99, 0x3a, 0x16, 0xc0, 0x36, 0x9c, 0xd7, 0x39, 0xfb, 0xbb, 0x29, 0x9d, 0x0f, 0x39, 0xe3, 0xb6, 0x21, 0x9d, 0x42, 0x39, 0xc5, 0xb1, 0x23, 0x9c, 0xb1, 0x39, 0x8e, 0xab, 0x55, 0x9b, 0xd0, 0x39, 0x4a, 0xa5, 0x38, 0x9a, 0xe0, 0x39, 0x0f, 0x9e, 0xdf, 0x99, 0xe1, 0x39, 0x66, 0x96, 0xf0, 0x98, 0xf1, 0x3a, 0x06, 0x8e, 0xa2, 0x99, 0x01, 0x3c, 0xec, 0x83, 0x4c, 0x99, 0x38, 0x40, 0xec, 0x77, 0x59, 0x98, 0xef, 0x45, 0xeb, 0x6b, 0xb4, 0x99, 0x3d, 0x4b, 0xb4, 0x5f, 0xb7, 0x99, 0xf6, 0x51, 0x3e, 0x58, 0x0d, 0x9a, 0x9d, 0x56, 0x0a, 0x51, 0x21, 0x9c, 0x03, 0x5b, 0x03, 0x49, 0x9f, 0xa3, 0x4c, 0x37, 0x76, 0xc8, 0x1d, 0xa4, 0x37, 0x37, 0xfb, 0xc1, 0xd3, 0xa4, 0x88, 0x37, 0xee, 0xbc, 0xb9, 0xa4, 0xad, 0x37, 0xc3, 0xb7, 0xea, 0xa4, 0xcb, 0x37, 0x8f, 0xb3, 0x26, 0xa4, 0xa8, 0x37, 0x5a, 0xae, 0x15, 0xa3, 0xfc, 0x37, 0x23, 0xa8, 0x4a, 0xa3, 0x43, 0x36, 0xea, 0xa2, 0x78, 0xa2, 0x84, 0x36, 0xfc, 0x9b, 0xb3, 0xa1, 0xc3, 0x37, 0x58, 0x94, 0x1e, 0xa1, 0x3d, 0x38, 0x99, 0x8b, 0x48, 0xa1, 0x2b, 0x3b, 0x51, 0x80, 0x8a, 0xa1, 0xb6, 0x3f, 0x3b, 0x74, 0xe8, 0xa1, 0xa7, 0x44, 0x74, 0x69, 0xb4, 0xa1, 0xe2, 0x4a, 0x0e, 0x5e, 0x8b, 0xa2, 0x9d, 0x4f, 0x26, 0x56, 0x89, 0xa3, 0x87, 0x53, 0xe4, 0x4f, 0x22, 0xaa, 0xdd, 0x35, 0x8b, 0xc9, 0x11, 0xab, 0x91, 0x36, 0x06, 0xc3, 0x33, 0xab, 0xef, 0x36, 0x12, 0xbd, 0xff, 0xab, 0xfc, 0x35, 0xcd, 0xb9, 0x50, 0xab, 0xfe, 0x35, 0x79, 0xb4, 0xb1, 0xab, 0xfa, 0x35, 0x20, 0xb0, 0x19, 0xab, 0x77, 0x34, 0xef, 0xaa, 0xab, 0xaa, 0xe6, 0x34, 0xb9, 0xa5, 0x35, 0xaa, 0x48, 0x34, 0x7d, 0x9f, 0xaf, 0xa9, 0xb1, 0x34, 0xd6, 0x98, 0x5d, 0xa9, 0x16, 0x35, 0x33, 0x91, 0x0b, 0xa8, 0xc0, 0x36, 0xda, 0x87, 0xf9, 0xa8, 0xa1, 0x39, 0x25, 0x7e, 0x25, 0xa8, 0xeb, 0x3d, 0x4b, 0x73, 0x1a, 0xa9, 0x15, 0x42, 0x8c, 0x68, 0x1d, 0xa9, 0x84, 0x47, 0xf7, 0x5d, 0x83, 0xaa, 0x71, 0x4c, 0xfe, 0x54, 0xc6, 0xb3, 0x22, 0x33, 0x68, 0xca, 0x2b, 0xb3, 0x47, 0x34, 0x19, 0xc4, 0x82, 0xb3, 0xad, 0x34, 0x5f, 0xbf, 0x3d, 0xb3, 0xa4, 0x34, 0x02, 0xba, 0xaa, 0xb3, 0x89, 0x33, 0x97, 0xb6, 0x26, 0xb3, 0x71, 0x33, 0x1d, 0xb1, 0xad, 0xb3, 0x21, 0x32, 0xce, 0xac, 0xc8, 0xb2, 0xc5, 0x32, 0x93, 0xa7, 0xa6, 0xb2, 0x5b, 0x32, 0x58, 0xa2, 0x80, 0xb1, 0xd1, 0x32, 0x66, 0x9c, 0x6e, 0xb1, 0x73, 0x32, 0xba, 0x95, 0x55, 0xb0, 0xe9, 0x33, 0x59, 0x8d, 0xfc, 0xb0, 0x93, 0x35, 0x03, 0x85, 0x4f, 0xb0, 0x5d, 0x37, 0x70, 0x7b, 0xfb, 0xb0, 0x90, 0x3b, 0x8e, 0x71, 0x7f, 0xb0, 0xa9, 0x40, 0xb5, 0x66, 0xc4, 0xb1, 0x41, 0x45, 0xf7, 0x5c, 0x48, 0x15, 0x13, 0x94, 0x44, 0x8c, 0x12, 0x17, 0x79, 0x96, 0x25, 0x83, 0xc9, 0x19, 0x58, 0x98, 0x89, 0x7b, 0x84, 0x1a, 0xdf, 0x9b, 0x20, 0x73, 0x51, 0x1c, 0x5a, 0x9e, 0x00, 0x6a, 0xa6, 0x1d, 0x9c, 0xa0, 0xde, 0x61, 0x3c, 0x20, 0x01, 0xa4, 0x77, 0x59, 0xe3, 0x22, 0x07, 0xa7, 0xde, 0x52, 0x5a, 0x24, 0x76, 0xaa, 0xc9, 0x4c, 0x22, 0x27, 0x02, 0xad, 0x41, 0x46, 0x03, 0x29, 0xbe, 0xaf, 0x35, 0x3f, 0xf6, 0x2c, 0xbd, 0xb0, 0xb8, 0x3c, 0x17, 0x2f, 0x24, 0xb2, 0x61, 0x38, 0x75, 0x32, 0x19, 0xb3, 0x7d, 0x34, 0xff, 0x35, 0x5a, 0xb4, 0x3d, 0x31, 0x8f, 0x38, 0x33, 0xb5, 0x49, 0x2d, 0xeb, 0x3b, 0x0c, 0xb6, 0x61, 0x2a, 0x6a, 0x1d, 0x9e, 0x8c, 0x70, 0x90, 0x49, 0x20, 0x0b, 0x8e, 0x49, 0x87, 0x7f, 0x22, 0x35, 0x90, 0x53, 0x7e, 0xdc, 0x23, 0xd9, 0x93, 0x03, 0x76, 0x77, 0x25, 0x5a, 0x95, 0xcd, 0x6e, 0x07, 0x26, 0xe1, 0x99, 0x09, 0x64, 0xf2, 0x28, 0xa1, 0x9c, 0x68, 0x5c, 0x27, 0x2b, 0x01, 0xa0, 0x02, 0x53, 0xc0, 0x2d, 0x4d, 0xa3, 0x6c, 0x4c, 0xe8, 0x2f, 0xc4, 0xa6, 0x78, 0x46, 0xa7, 0x32, 0x31, 0xa8, 0xfa, 0x40, 0x78, 0x34, 0xe3, 0xaa, 0xbe, 0x3c, 0xd4, 0x37, 0xc3, 0xac, 0x46, 0x39, 0x5e, 0x3a, 0xbf, 0xad, 0x96, 0x36, 0x06, 0x3d, 0xca, 0xae, 0xb3, 0x32, 0xbb, 0x40, 0xb8, 0xaf, 0xb4, 0x2f, 0x70, 0x43, 0x1c, 0xb0, 0xf8, 0x2c, 0x02, 0x25, 0xd7, 0x85, 0x55, 0x94, 0xea, 0x28, 0x87, 0x86, 0x72, 0x8c, 0x39, 0x2a, 0xf0, 0x87, 0xd1, 0x83, 0xcd, 0x2c, 0xa5, 0x8a, 0x62, 0x7a, 0xee, 0x2d, 0xf2, 0x8d, 0x7b, 0x71, 0xc7, 0x2f, 0xa5, 0x91, 0x21, 0x68, 0xa5, 0x31, 0x4f, 0x94, 0x62, 0x5f, 0xb5, 0x33, 0x2e, 0x98, 0x1e, 0x57, 0x97, 0x35, 0x4b, 0x9b, 0xa9, 0x4f, 0x73, 0x38, 0x1e, 0x9f, 0x5f, 0x48, 0xd8, 0x3a, 0x72, 0xa2, 0x6d, 0x42, 0x5b, 0x3c, 0xc9, 0xa4, 0xc1, 0x3d, 0xf0, 0x3f, 0x59, 0xa6, 0xb6, 0x3a, 0x73, 0x42, 0x04, 0xa8, 0x5e, 0x37, 0x1d, 0x44, 0xa8, 0xa9, 0xcf, 0x33, 0xca, 0x47, 0x33, 0xab, 0x17, 0x30, 0x61, 0x49, 0xb1, 0xac, 0x4b, 0x2d, 0x10, 0x2e, 0x6b, 0x7e, 0x09, 0x99, 0xd8, 0x30, 0xf1, 0x7e, 0xf5, 0x90, 0xf9, 0x33, 0xa6, 0x7f, 0xd0, 0x88, 0xae, 0x36, 0x71, 0x80, 0x80, 0x80, 0x80, 0x37, 0x81, 0x84, 0x63, 0x77, 0x1d, 0x38, 0xb7, 0x87, 0xee, 0x6d, 0xd9, 0x3a, 0x4a, 0x8b, 0xd8, 0x64, 0x9c, 0x3c, 0x12, 0x8f, 0xbe, 0x5b, 0xe4, 0x3d, 0xb7, 0x93, 0x84, 0x53, 0xb8, 0x3f, 0xc0, 0x97, 0x5a, 0x4c, 0x68, 0x42, 0x23, 0x9b, 0x00, 0x45, 0xda, 0x44, 0x9c, 0x9e, 0x2c, 0x3f, 0xab, 0x47, 0x16, 0xa0, 0xc9, 0x3c, 0x05, 0x49, 0x6b, 0xa2, 0xdd, 0x38, 0x8b, 0x4b, 0xbe, 0xa4, 0xac, 0x35, 0x16, 0x4e, 0x05, 0xa6, 0x44, 0x31, 0x9b, 0x50, 0x68, 0xa7, 0xae, 0x2e, 0x22, 0x38, 0xda, 0x75, 0xbb, 0x9f, 0x72, 0x3b, 0x57, 0x76, 0x8a, 0x96, 0xe4, 0x3d, 0xc2, 0x77, 0x6e, 0x8e, 0x86, 0x3f, 0xbc, 0x78, 0xf8, 0x86, 0x02, 0x41, 0x83, 0x7b, 0x00, 0x7d, 0x22, 0x42, 0x8f, 0x7e, 0x79, 0x73, 0x86, 0x43, 0xd3, 0x82, 0x77, 0x6a, 0x44, 0x45, 0x2c, 0x86, 0x54, 0x61, 0x38, 0x46, 0xa4, 0x8a, 0x9d, 0x58, 0xda, 0x48, 0x60, 0x8e, 0x86, 0x50, 0xb9, 0x4a, 0x66, 0x92, 0xac, 0x4a, 0x3e, 0x4c, 0x7c, 0x96, 0x53, 0x43, 0xc0, 0x4e, 0xa4, 0x99, 0x79, 0x3e, 0x76, 0x50, 0xe4, 0x9c, 0x51, 0x3a, 0x8a, 0x53, 0x3f, 0x9e, 0xdf, 0x36, 0xc5, 0x55, 0x6f, 0xa0, 0xf9, 0x33, 0x20, 0x57, 0xa1, 0xa2, 0xb9, 0x2f, 0x6c, 0x44, 0x29, 0x6d, 0x4a, 0xa5, 0x3b, 0x46, 0xb5, 0x6d, 0xf9, 0x9c, 0xe0, 0x48, 0xc0, 0x6e, 0xff, 0x94, 0x85, 0x4a, 0x7f, 0x70, 0x93, 0x8b, 0xde, 0x4b, 0xe1, 0x72, 0x9f, 0x83, 0x32, 0x4c, 0xc5, 0x75, 0x69, 0x79, 0xe1, 0x4d, 0x88, 0x78, 0xb6, 0x70, 0x4a, 0x4e, 0xd1, 0x7c, 0xdc, 0x67, 0x4a, 0x50, 0x5f, 0x80, 0xd3, 0x5e, 0x8b, 0x51, 0x8f, 0x85, 0x65, 0x56, 0x3f, 0x53, 0x15, 0x89, 0x96, 0x4e, 0x95, 0x54, 0xf1, 0x8d, 0xea, 0x48, 0x3a, 0x56, 0xf6, 0x91, 0xc0, 0x41, 0xb8, 0x58, 0xd5, 0x94, 0xfa, 0x3d, 0x10, 0x5a, 0xcd, 0x97, 0xd0, 0x39, 0x02, 0x5c, 0xd3, 0x9a, 0x5b, 0x35, 0x11, 0x5e, 0xf4, 0x9c, 0xb3, 0x31, 0x16, 0x4f, 0xce, 0x64, 0xbb, 0xab, 0x81, 0x52, 0xcf, 0x65, 0xb0, 0xa3, 0x3d, 0x54, 0xa0, 0x66, 0xc4, 0x9b, 0x19, 0x55, 0xd8, 0x67, 0xfc, 0x92, 0xd6, 0x56, 0x92, 0x6a, 0x01, 0x89, 0xc3, 0x57, 0x78, 0x6c, 0x63, 0x80, 0x90, 0x58, 0x16, 0x6f, 0xec, 0x76, 0xbc, 0x58, 0xab, 0x73, 0x77, 0x6d, 0x72, 0x59, 0xa0, 0x77, 0x8a, 0x64, 0x9d, 0x5a, 0xeb, 0x7b, 0xe0, 0x5c, 0x14, 0x5c, 0xa2, 0x80, 0x6d, 0x53, 0xfd, 0x5d, 0xfc, 0x84, 0xde, 0x4c, 0xb7, 0x5f, 0x8d, 0x89, 0x3b, 0x46, 0x3c, 0x61, 0x60, 0x8d, 0x1b, 0x3f, 0xf7, 0x63, 0x30, 0x90, 0x7c, 0x3b, 0x8e, 0x64, 0xda, 0x93, 0x70, 0x37, 0x29, 0x66, 0xb6, 0x96, 0x30, 0x32, 0xa9, 0x5b, 0x61, 0x5b, 0xe5, 0xb1, 0xf8, 0x5e, 0x05, 0x5d, 0x0c, 0xa9, 0xc4, 0x60, 0xeb, 0x5e, 0x40, 0xa1, 0xc0, 0x61, 0xe6, 0x5f, 0xc5, 0x99, 0x57, 0x62, 0x65, 0x61, 0x64, 0x90, 0xf0, 0x62, 0x58, 0x63, 0xf8, 0x87, 0x41, 0x62, 0x95, 0x66, 0xcd, 0x7d, 0x96, 0x62, 0xd2, 0x6a, 0x6f, 0x73, 0xb1, 0x63, 0xa1, 0x6e, 0x65, 0x6a, 0x98, 0x64, 0x95, 0x72, 0x52, 0x62, 0x01, 0x65, 0xab, 0x77, 0x01, 0x59, 0xb6, 0x67, 0x2e, 0x7b, 0x72, 0x51, 0xc9, 0x68, 0xcb, 0x80, 0x2f, 0x4a, 0xe7, 0x6a, 0x0e, 0x84, 0x95, 0x44, 0x47, 0x6b, 0x92, 0x88, 0x75, 0x3e, 0x6e, 0x6d, 0x31, 0x8b, 0xf5, 0x39, 0xac, 0x6e, 0xfb, 0x8f, 0x49, 0x34, 0xba, 0x68, 0x34, 0x54, 0x33, 0xb8, 0x06, 0x6b, 0x14, 0x55, 0xbc, 0xaf, 0x5a, 0x6c, 0xa7, 0x56, 0xdd, 0xa7, 0xb9, 0x6e, 0x6f, 0x57, 0xf6, 0xa0, 0x12, 0x6e, 0x11, 0x59, 0x7d, 0x97, 0x97, 0x6d, 0xf0, 0x5b, 0x2c, 0x8e, 0xef, 0x6d, 0x91, 0x5d, 0xec, 0x84, 0xd3, 0x6d, 0x6b, 0x61, 0x76, 0x7a, 0x71, 0x6d, 0x7f, 0x65, 0x33, 0x70, 0x6b, 0x6e, 0x30, 0x69, 0x58, 0x67, 0x90, 0x6f, 0x46, 0x6d, 0x5e, 0x5f, 0x2d, 0x70, 0x7e, 0x72, 0x4f, 0x57, 0x61, 0x71, 0xc1, 0x76, 0xa6, 0x4f, 0xb1, 0x73, 0x15, 0x7b, 0x6c, 0x49, 0x01, 0x74, 0x83, 0x7f, 0xf5, 0x42, 0x5f, 0x75, 0xc9, 0x83, 0xf1, 0x3c, 0xab, 0x77, 0x44, 0x87, 0x9e, 0x37, 0x50, 0x74, 0x76, 0x4d, 0x3b, 0xbd, 0xdd, 0x77, 0x17, 0x4e, 0x95, 0xb5, 0x1c, 0x78, 0xf1, 0x4f, 0xd6, 0xac, 0xec, 0x79, 0xbb, 0x50, 0xf9, 0xa5, 0xba, 0x7a, 0x50, 0x52, 0x26, 0x9e, 0x4c, 0x79, 0xae, 0x53, 0xa1, 0x95, 0xf5, 0x79, 0x25, 0x55, 0x50, 0x8d, 0x31, 0x78, 0x80, 0x57, 0xaa, 0x83, 0x36, 0x78, 0x12, 0x5b, 0xd4, 0x78, 0x0f, 0x78, 0x32, 0x60, 0x4f, 0x6d, 0x56, 0x78, 0xd9, 0x64, 0x5a, 0x64, 0xb0, 0x79, 0xc6, 0x68, 0xbc, 0x5c, 0x9f, 0x7a, 0xf7, 0x6d, 0x95, 0x55, 0x12, 0x7c, 0x39, 0x72, 0x32, 0x4d, 0xdb, 0x7d, 0x5b, 0x76, 0xc8, 0x47, 0x3a, 0x7e, 0x93, 0x7b, 0x1c, 0x40, 0x98, 0x7f, 0xff, 0x7f, 0x95, 0x3a, 0x8f, 0x81, 0x7f, 0x47, 0xd0, 0xc2, 0x1a, 0x82, 0xb1, 0x48, 0xd3, 0xba, 0x86, 0x83, 0xfa, 0x49, 0xd8, 0xb2, 0xfc, 0x84, 0x9b, 0x4a, 0xac, 0xab, 0xb1, 0x84, 0xde, 0x4b, 0x6d, 0xa4, 0x98, 0x84, 0xde, 0x4c, 0x71, 0x9c, 0xf8, 0x84, 0x45, 0x4d, 0xd8, 0x94, 0x73, 0x83, 0xc2, 0x4f, 0x8d, 0x8b, 0x3e, 0x83, 0x1f, 0x51, 0x6e, 0x81, 0x74, 0x82, 0xbb, 0x56, 0x63, 0x75, 0xd2, 0x82, 0xe1, 0x5b, 0x38, 0x6b, 0x00, 0x83, 0xa6, 0x5f, 0x84, 0x61, 0xd9, 0x84, 0x56, 0x64, 0x3d, 0x5a, 0x2d, 0x85, 0x62, 0x68, 0xd7, 0x52, 0xce, 0x86, 0xbf, 0x6d, 0xc0, 0x4b, 0xf9, 0x87, 0xd0, 0x72, 0x9b, 0x45, 0x37, 0x88, 0xdc, 0x77, 0x6f, 0x3e, 0x30, 0x8c, 0xfb, 0x43, 0x7b, 0xc4, 0xd2, 0x8e, 0x1a, 0x44, 0x60, 0xbd, 0x8e, 0x8e, 0xd7, 0x45, 0x08, 0xb7, 0x28, 0x8f, 0xa6, 0x45, 0xc1, 0xb0, 0xab, 0x8f, 0x74, 0x46, 0x29, 0xa9, 0xed, 0x8f, 0x3a, 0x46, 0x97, 0xa3, 0x21, 0x8e, 0xaf, 0x47, 0x61, 0x9b, 0x80, 0x8d, 0xea, 0x48, 0x88, 0x93, 0x20, 0x8d, 0x71, 0x4a, 0x4f, 0x89, 0x64, 0x8d, 0x0c, 0x4c, 0x87, 0x7e, 0xf7, 0x8d, 0x39, 0x51, 0xa1, 0x73, 0x3c, 0x8d, 0x62, 0x56, 0xd2, 0x68, 0x42, 0x8e, 0x24, 0x5b, 0x67, 0x5f, 0x1e, 0x8e, 0xc3, 0x60, 0x3d, 0x57, 0xf4, 0x8f, 0xbb, 0x64, 0x92, 0x50, 0xdd, 0x90, 0xf9, 0x69, 0xb0, 0x49, 0xff, 0x92, 0x50, 0x6e, 0xfe, 0x42, 0x51, 0x95, 0xfc, 0x3f, 0xfa, 0xc8, 0x09, 0x97, 0xb8, 0x40, 0xf7, 0xbf, 0x62, 0x98, 0x30, 0x41, 0x53, 0xb9, 0xb2, 0x98, 0xae, 0x41, 0xbb, 0xb3, 0xfa, 0x98, 0xee, 0x42, 0x10, 0xae, 0x0f, 0x98, 0x8e, 0x42, 0x38, 0xa7, 0xa8, 0x98, 0x33, 0x42, 0x64, 0xa1, 0x3f, 0x97, 0x86, 0x43, 0x0c, 0x99, 0x96, 0x96, 0xce, 0x43, 0xe5, 0x91, 0x8f, 0x96, 0x86, 0x46, 0x52, 0x87, 0x18, 0x96, 0x5d, 0x49, 0x77, 0x7c, 0x14, 0x96, 0xb4, 0x4e, 0x01, 0x70, 0xba, 0x97, 0x15, 0x53, 0x6a, 0x65, 0x2b, 0x97, 0xa1, 0x58, 0x3e, 0x5c, 0xe9, 0x98, 0x5c, 0x5c, 0xe5, 0x55, 0xcb, 0x99, 0x81, 0x61, 0x35, 0x4e, 0x6a, 0x9b, 0x29, 0x66, 0xc5, 0x46, 0x50, 0x9e, 0x5b, 0x3d, 0xa8, 0xc8, 0x5d, 0xa0, 0x13, 0x3e, 0x55, 0xc1, 0x1d, 0xa0, 0x93, 0x3e, 0x7a, 0xbb, 0xa6, 0xa0, 0xec, 0x3e, 0x95, 0xb6, 0x6e, 0xa1, 0x47, 0x3e, 0xb1, 0xb1, 0x3a, 0xa1, 0x16, 0x3e, 0xc1, 0xab, 0x5c, 0xa0, 0xb9, 0x3e, 0xc9, 0xa5, 0x49, 0xa0, 0x58, 0x3e, 0xdb, 0x9f, 0x09, 0x9f, 0xd5, 0x3f, 0x50, 0x97, 0x87, 0x9f, 0x41, 0x3f, 0xd0, 0x8f, 0xfc, 0x9f, 0x31, 0x43, 0x6e, 0x84, 0x88, 0x9f, 0x59, 0x47, 0x30, 0x79, 0x53, 0x9f, 0xc8, 0x4b, 0x89, 0x6e, 0x37, 0xa0, 0x47, 0x50, 0xcf, 0x61, 0xe8, 0xa0, 0xc2, 0x55, 0xa6, 0x5a, 0xf0, 0xa1, 0x93, 0x5a, 0x1c, 0x53, 0xd1, 0xa3, 0x11, 0x5e, 0x9a, 0x4b, 0x9e, 0xa5, 0xf4, 0x3b, 0x52, 0xc9, 0xb3, 0xa7, 0x1c, 0x3b, 0xd5, 0xc3, 0x25, 0xa7, 0xc9, 0x3c, 0x0c, 0xbd, 0x7b, 0xa8, 0x16, 0x3c, 0x08, 0xb8, 0x7d, 0xa8, 0x5f, 0x3c, 0x04, 0xb3, 0x86, 0xa8, 0x7a, 0x3c, 0x03, 0xae, 0x52, 0xa8, 0x14, 0x3c, 0x0d, 0xa8, 0x71, 0xa7, 0xae, 0x3c, 0x14, 0xa2, 0x8f, 0xa7, 0x47, 0x3c, 0x58, 0x9b, 0xe5, 0xa6, 0xe5, 0x3c, 0xd9, 0x94, 0x92, 0xa6, 0xa1, 0x3e, 0x05, 0x8c, 0x46, 0xa6, 0xab, 0x40, 0x88, 0x81, 0xf8, 0xa6, 0xfa, 0x44, 0x93, 0x77, 0x0f, 0xa7, 0x59, 0x49, 0x19, 0x6c, 0x29, 0xa7, 0xc1, 0x4e, 0x58, 0x60, 0x69, 0xa8, 0xac, 0x53, 0x15, 0x58, 0xdd, 0xaa, 0x05, 0x58, 0x30, 0x50, 0x15, 0xad, 0x8d, 0x39, 0x3c, 0xca, 0xa6, 0xae, 0x6a, 0x39, 0xb0, 0xc4, 0x96, 0xaf, 0x19, 0x39, 0xf4, 0xbe, 0xf0, 0xaf, 0x53, 0x39, 0xd8, 0xba, 0x1b, 0xaf, 0x87, 0x39, 0xb5, 0xb5, 0x51, 0xaf, 0xb5, 0x39, 0x90, 0xb0, 0x8a, 0xaf, 0x6f, 0x39, 0x8f, 0xab, 0x13, 0xaf, 0x17, 0x39, 0x95, 0xa5, 0x7b, 0xae, 0xba, 0x39, 0x9b, 0x9f, 0xdd, 0xae, 0x74, 0x3a, 0x18, 0x98, 0xc0, 0xae, 0x31, 0x3a, 0x9e, 0x91, 0xa7, 0xae, 0x08, 0x3c, 0x15, 0x89, 0x0b, 0xad, 0xff, 0x3d, 0xf1, 0x7f, 0xb4, 0xae, 0x5e, 0x42, 0x5c, 0x75, 0x13, 0xae, 0xd7, 0x46, 0xfd, 0x6a, 0x6d, 0xaf, 0x4f, 0x4c, 0x16, 0x5f, 0x77, 0xb0, 0x83, 0x50, 0xc8, 0x56, 0x51, 0xb5, 0xb4, 0x36, 0xe1, 0xcb, 0xef, 0xb6, 0x47, 0x37, 0x76, 0xc6, 0x48, 0xb6, 0xf8, 0x38, 0x16, 0xc0, 0xac, 0xb7, 0x27, 0x37, 0xf3, 0xbb, 0xe5, 0xb7, 0x4a, 0x37, 0xba, 0xb7, 0x39, 0xb7, 0x68, 0x37, 0x78, 0xb2, 0x93, 0xb7, 0x4f, 0x37, 0x50, 0xad, 0xa2, 0xb7, 0x05, 0x37, 0x48, 0xa8, 0x49, 0xb6, 0xb5, 0x37, 0x3e, 0xa2, 0xed, 0xb6, 0x64, 0x37, 0x64, 0x9c, 0xf2, 0xb6, 0x30, 0x37, 0xc5, 0x96, 0x0c, 0xb5, 0xef, 0x38, 0x45, 0x8f, 0x19, 0xb5, 0xdd, 0x39, 0xfe, 0x86, 0x76, 0xb5, 0xe8, 0x3c, 0x1f, 0x7d, 0x6e, 0xb6, 0x03, 0x3f, 0xd9, 0x73, 0x3d, 0xb6, 0x59, 0x44, 0x7a, 0x68, 0xa6, 0xb7, 0x1b, 0x49, 0x8a, 0x5d, 0xc8, 0x1b, 0xc4, 0x9a, 0x3f, 0x90, 0x63, 0x1d, 0xe5, 0x9c, 0x39, 0x88, 0x2b, 0x1f, 0xaf, 0x9e, 0x45, 0x80, 0x1e, 0x20, 0xef, 0xa0, 0xa2, 0x77, 0xf1, 0x22, 0x0e, 0xa2, 0xf9, 0x6f, 0xdf, 0x23, 0x3f, 0xa5, 0xfa, 0x66, 0xb9, 0x24, 0x65, 0xa9, 0x3f, 0x5e, 0x35, 0x26, 0x52, 0xac, 0x63, 0x56, 0x80, 0x28, 0x58, 0xae, 0xeb, 0x4e, 0xf0, 0x2b, 0x00, 0xb0, 0xf5, 0x48, 0xd6, 0x2d, 0x2f, 0xb2, 0xe5, 0x42, 0x86, 0x30, 0x11, 0xb4, 0x34, 0x3e, 0x20, 0x31, 0x77, 0xb6, 0x66, 0x3a, 0x75, 0x34, 0xba, 0xb7, 0x29, 0x36, 0xe6, 0x38, 0xf8, 0xb7, 0x21, 0x33, 0x85, 0x3b, 0xeb, 0xb7, 0xca, 0x30, 0x15, 0x3e, 0xdf, 0xb8, 0xc5, 0x2c, 0xb3, 0x23, 0x72, 0x93, 0x30, 0x94, 0x5b, 0x26, 0x17, 0x94, 0x90, 0x8b, 0xc0, 0x28, 0x2a, 0x96, 0x49, 0x83, 0x80, 0x29, 0xce, 0x98, 0x7d, 0x7b, 0x27, 0x2b, 0x07, 0x9b, 0x12, 0x72, 0xc0, 0x2c, 0x4f, 0x9d, 0xe0, 0x69, 0x96, 0x2d, 0x90, 0xa0, 0x84, 0x5f, 0xd7, 0x2f, 0xaa, 0xa4, 0x19, 0x58, 0x0b, 0x31, 0x66, 0xa7, 0x39, 0x4f, 0xec, 0x33, 0xe9, 0xa9, 0xfc, 0x49, 0xd0, 0x36, 0x50, 0xac, 0x48, 0x43, 0x5d, 0x39, 0x00, 0xae, 0x03, 0x3e, 0x8c, 0x3c, 0x0b, 0xaf, 0x53, 0x3b, 0x1f, 0x3f, 0x0b, 0xb0, 0x72, 0x37, 0xd1, 0x41, 0xbb, 0xb1, 0x85, 0x34, 0x8f, 0x44, 0x3c, 0xb2, 0x7e, 0x31, 0x35, 0x46, 0xa3, 0xb3, 0x93, 0x2d, 0xc8, 0x2b, 0x25, 0x8c, 0x3c, 0x98, 0xe3, 0x2e, 0x18, 0x8d, 0x16, 0x8f, 0xb5, 0x30, 0x2f, 0x8e, 0xbf, 0x87, 0x2c, 0x32, 0x1d, 0x90, 0x9f, 0x7e, 0x99, 0x33, 0xb6, 0x93, 0x26, 0x76, 0x04, 0x35, 0x31, 0x95, 0xed, 0x6d, 0x5e, 0x36, 0xb3, 0x98, 0xf7, 0x64, 0x2a, 0x38, 0x5c, 0x9c, 0x3e, 0x5b, 0x61, 0x3a, 0x57, 0x9f, 0x95, 0x52, 0xcf, 0x3c, 0x85, 0xa2, 0xce, 0x4b, 0xf3, 0x3e, 0xc3, 0xa5, 0x98, 0x45, 0xa1, 0x41, 0x0c, 0xa7, 0xe2, 0x3f, 0xb2, 0x43, 0xbd, 0xa9, 0xa6, 0x3c, 0x58, 0x46, 0x64, 0xab, 0x2d, 0x38, 0xfd, 0x48, 0xfb, 0xac, 0x82, 0x35, 0xa2, 0x4b, 0x7a, 0xad, 0xad, 0x32, 0x3e, 0x4d, 0xf7, 0xae, 0xcd, 0x2e, 0xba, 0x33, 0x6c, 0x85, 0x0f, 0x9d, 0xa4, 0x36, 0x57, 0x85, 0xb6, 0x94, 0xb2, 0x39, 0x16, 0x86, 0x8b, 0x8c, 0x28, 0x3b, 0x6d, 0x87, 0xe9, 0x83, 0xa6, 0x3d, 0x01, 0x8a, 0x70, 0x7a, 0xa6, 0x3e, 0x47, 0x8d, 0x53, 0x71, 0x77, 0x3f, 0xde, 0x90, 0xbd, 0x68, 0x6d, 0x41, 0x6c, 0x93, 0xe0, 0x5f, 0xad, 0x43, 0x0f, 0x97, 0x88, 0x57, 0x85, 0x44, 0xe1, 0x9a, 0xdf, 0x4f, 0x6d, 0x47, 0x3b, 0x9e, 0x58, 0x48, 0xf5, 0x49, 0x89, 0xa1, 0x39, 0x42, 0x81, 0x4b, 0xbd, 0xa3, 0x8e, 0x3e, 0x0c, 0x4e, 0x04, 0xa5, 0x8c, 0x3a, 0x84, 0x50, 0x48, 0xa7, 0x43, 0x37, 0x05, 0x52, 0x88, 0xa8, 0xc5, 0x33, 0x89, 0x54, 0xdb, 0xaa, 0x2f, 0x2f, 0xda, 0x3c, 0x15, 0x7d, 0x97, 0xa2, 0x3c, 0x3f, 0x19, 0x7e, 0x0d, 0x99, 0xad, 0x42, 0x10, 0x7e, 0x80, 0x91, 0x62, 0x44, 0xa7, 0x7f, 0x90, 0x88, 0xec, 0x47, 0x47, 0x80, 0x80, 0x80, 0x80, 0x48, 0x5c, 0x84, 0x2b, 0x77, 0x28, 0x49, 0x8d, 0x87, 0x72, 0x6d, 0xfe, 0x4a, 0xe0, 0x8b, 0x0c, 0x65, 0x00, 0x4c, 0x4e, 0x8e, 0xb7, 0x5c, 0x5c, 0x4d, 0xe9, 0x92, 0x6a, 0x54, 0x6b, 0x4f, 0xaf, 0x96, 0x0c, 0x4d, 0x3b, 0x51, 0xc0, 0x99, 0x8b, 0x46, 0xe2, 0x53, 0xf2, 0x9c, 0x90, 0x40, 0x77, 0x56, 0x29, 0x9f, 0x47, 0x3c, 0x78, 0x58, 0x36, 0xa1, 0x80, 0x38, 0xc7, 0x5a, 0x2d, 0xa3, 0x5f, 0x35, 0x27, 0x5c, 0x54, 0xa5, 0x2e, 0x31, 0x28, 0x47, 0x10, 0x75, 0x2b, 0xa7, 0xe4, 0x4a, 0x6a, 0x75, 0x77, 0x9f, 0x78, 0x4c, 0xdf, 0x76, 0x52, 0x97, 0x25, 0x4f, 0x47, 0x77, 0x43, 0x8e, 0xc3, 0x51, 0x16, 0x78, 0xf2, 0x86, 0x20, 0x52, 0xaf, 0x7a, 0xfa, 0x7d, 0x44, 0x53, 0x99, 0x7e, 0x3d, 0x73, 0xd9, 0x54, 0xca, 0x81, 0xd4, 0x6a, 0xe3, 0x55, 0xfd, 0x85, 0x5b, 0x62, 0x2a, 0x57, 0x52, 0x89, 0x60, 0x59, 0xe6, 0x58, 0xf5, 0x8d, 0x29, 0x51, 0xf3, 0x5a, 0xb6, 0x91, 0x2c, 0x4b, 0x48, 0x5c, 0x78, 0x94, 0xd6, 0x44, 0xe2, 0x5e, 0x51, 0x97, 0xfc, 0x3f, 0x12, 0x60, 0x32, 0x9a, 0xae, 0x3b, 0x08, 0x62, 0x25, 0x9d, 0x1c, 0x37, 0x06, 0x64, 0x42, 0x9f, 0x69, 0x32, 0xd8, 0x52, 0x72, 0x6c, 0x79, 0xad, 0xce, 0x55, 0xb0, 0x6d, 0x06, 0xa5, 0x70, 0x58, 0x70, 0x6d, 0xc7, 0x9d, 0x25, 0x5a, 0x40, 0x6e, 0xfd, 0x94, 0x8e, 0x5b, 0xd8, 0x70, 0xb5, 0x8b, 0xcb, 0x5d, 0x2b, 0x72, 0xc7, 0x83, 0x31, 0x5e, 0x05, 0x75, 0x83, 0x7a, 0x22, 0x5e, 0xcb, 0x78, 0xad, 0x70, 0xec, 0x5f, 0xd6, 0x7c, 0x67, 0x68, 0x32, 0x61, 0x29, 0x80, 0x05, 0x5f, 0xa6, 0x62, 0x76, 0x84, 0x45, 0x57, 0xa2, 0x64, 0x00, 0x88, 0x1c, 0x4f, 0xcf, 0x65, 0x7e, 0x8c, 0x53, 0x49, 0x5b, 0x67, 0x46, 0x90, 0x0e, 0x43, 0x04, 0x68, 0xc8, 0x93, 0x4b, 0x3d, 0xb3, 0x6a, 0x61, 0x96, 0x2a, 0x39, 0x33, 0x6c, 0x42, 0x98, 0xf2, 0x34, 0x5d, 0x5e, 0x11, 0x63, 0x3f, 0xb3, 0xce, 0x61, 0x34, 0x64, 0x5a, 0xab, 0x80, 0x64, 0x4f, 0x65, 0x74, 0xa3, 0x69, 0x66, 0x07, 0x66, 0xc6, 0x9b, 0x14, 0x67, 0x0d, 0x68, 0x47, 0x92, 0x90, 0x67, 0xc8, 0x6a, 0x6f, 0x89, 0x83, 0x68, 0xaa, 0x6c, 0xba, 0x80, 0x84, 0x69, 0x45, 0x70, 0x2f, 0x77, 0x01, 0x69, 0xe8, 0x73, 0x86, 0x6e, 0x00, 0x6a, 0xe0, 0x77, 0x2d, 0x65, 0x84, 0x6c, 0x28, 0x7b, 0x0d, 0x5d, 0x47, 0x6d, 0xa9, 0x7f, 0x3a, 0x55, 0x8a, 0x6e, 0xee, 0x83, 0x42, 0x4e, 0x14, 0x70, 0x21, 0x87, 0x8a, 0x47, 0x79, 0x71, 0xa0, 0x8b, 0x4a, 0x41, 0x13, 0x73, 0x38, 0x8e, 0xb6, 0x3b, 0xf1, 0x74, 0xd1, 0x91, 0xec, 0x36, 0xa2, 0x6a, 0x6d, 0x5b, 0x38, 0xba, 0x78, 0x6e, 0x23, 0x5c, 0x9e, 0xb0, 0xd7, 0x70, 0x4a, 0x5d, 0x9b, 0xa8, 0xf3, 0x72, 0x50, 0x5e, 0x8b, 0xa1, 0x18, 0x72, 0xc5, 0x60, 0x1d, 0x98, 0xa9, 0x73, 0x33, 0x61, 0xd9, 0x90, 0x54, 0x73, 0x49, 0x64, 0x71, 0x86, 0xfe, 0x73, 0x9f, 0x67, 0x31, 0x7d, 0xaa, 0x74, 0x08, 0x6a, 0xd3, 0x73, 0xf0, 0x74, 0xe6, 0x6e, 0x91, 0x6b, 0x23, 0x75, 0xdb, 0x72, 0x30, 0x62, 0xdf, 0x76, 0xe6, 0x76, 0x4c, 0x5a, 0xed, 0x78, 0x36, 0x7a, 0x49, 0x53, 0x55, 0x79, 0x97, 0x7e, 0x69, 0x4c, 0x58, 0x7a, 0xab, 0x82, 0xbb, 0x45, 0xa6, 0x7b, 0xeb, 0x86, 0x94, 0x3f, 0x48, 0x7d, 0x72, 0x8a, 0x59, 0x39, 0x6e, 0x77, 0x08, 0x53, 0x9d, 0xbf, 0x75, 0x79, 0xd3, 0x55, 0x2a, 0xb6, 0xb0, 0x7c, 0x23, 0x56, 0x80, 0xae, 0x6d, 0x7d, 0x53, 0x57, 0x92, 0xa7, 0x01, 0x7e, 0x89, 0x58, 0xa6, 0x9f, 0x76, 0x7e, 0x71, 0x5a, 0x41, 0x97, 0x1b, 0x7e, 0x88, 0x5c, 0x07, 0x8e, 0x87, 0x7e, 0x6d, 0x5e, 0xb8, 0x84, 0xae, 0x7e, 0x79, 0x62, 0x0e, 0x7a, 0xa3, 0x7e, 0xbe, 0x65, 0xa7, 0x70, 0xbe, 0x7f, 0x96, 0x69, 0x7e, 0x68, 0x5d, 0x80, 0xa0, 0x6d, 0x3b, 0x60, 0x4d, 0x81, 0xaf, 0x71, 0xaf, 0x58, 0xbb, 0x82, 0xe7, 0x75, 0x86, 0x51, 0x3b, 0x83, 0xf9, 0x79, 0xed, 0x4a, 0x6d, 0x85, 0x0d, 0x7e, 0x47, 0x43, 0xb6, 0x86, 0x4c, 0x82, 0xca, 0x3c, 0xc1, 0x83, 0x8d, 0x4d, 0x13, 0xc4, 0x8b, 0x85, 0x6e, 0x4e, 0x66, 0xbb, 0x79, 0x87, 0x33, 0x4f, 0x88, 0xb3, 0x6b, 0x88, 0x54, 0x50, 0xb0, 0xab, 0xf4, 0x89, 0x05, 0x51, 0xd5, 0xa4, 0xe3, 0x89, 0x79, 0x53, 0x18, 0x9d, 0x69, 0x89, 0x4a, 0x54, 0xa1, 0x95, 0x40, 0x89, 0x29, 0x56, 0x63, 0x8c, 0x88, 0x88, 0xf5, 0x58, 0xb8, 0x82, 0xc8, 0x88, 0xf8, 0x5c, 0xc4, 0x78, 0x00, 0x89, 0x44, 0x60, 0xe0, 0x6d, 0xba, 0x8a, 0x36, 0x64, 0xbe, 0x65, 0x74, 0x8b, 0x3c, 0x68, 0xc1, 0x5d, 0xa7, 0x8c, 0x77, 0x6c, 0xf8, 0x56, 0x75, 0x8e, 0x07, 0x70, 0xd8, 0x4f, 0x51, 0x8e, 0x8c, 0x75, 0xc2, 0x48, 0x5c, 0x8f, 0x63, 0x7b, 0x01, 0x40, 0x4e, 0x8e, 0xde, 0x48, 0x93, 0xc7, 0xe9, 0x90, 0xaa, 0x49, 0x9b, 0xbe, 0xc1, 0x91, 0x9a, 0x4a, 0x61, 0xb8, 0x02, 0x92, 0xa8, 0x4b, 0x40, 0xb1, 0x27, 0x93, 0x09, 0x4b, 0xfd, 0xaa, 0x3c, 0x93, 0x67, 0x4c, 0xca, 0xa3, 0x45, 0x93, 0x80, 0x4d, 0xe2, 0x9b, 0xa0, 0x93, 0x60, 0x4f, 0x48, 0x93, 0x64, 0x93, 0x31, 0x51, 0x08, 0x8a, 0x3e, 0x92, 0xf2, 0x53, 0x1c, 0x80, 0x81, 0x93, 0x2c, 0x57, 0xd4, 0x75, 0x97, 0x93, 0x9e, 0x5c, 0x4c, 0x6b, 0x58, 0x94, 0xa5, 0x60, 0x5f, 0x62, 0x90, 0x95, 0x79, 0x64, 0x96, 0x5b, 0x35, 0x96, 0xb4, 0x68, 0x9b, 0x54, 0x12, 0x98, 0x42, 0x6c, 0xc7, 0x4c, 0xe0, 0x99, 0x80, 0x72, 0x1d, 0x44, 0xbd, 0x97, 0xcc, 0x44, 0x79, 0xcb, 0xb3, 0x9a, 0x37, 0x45, 0xde, 0xc1, 0x0d, 0x9b, 0x0c, 0x46, 0x68, 0xba, 0xe6, 0x9b, 0xd2, 0x46, 0xf8, 0xb4, 0xdd, 0x9c, 0x7d, 0x47, 0x86, 0xae, 0xb7, 0x9c, 0x8d, 0x47, 0xfc, 0xa8, 0x1c, 0x9c, 0xb5, 0x48, 0x7f, 0xa1, 0x7b, 0x9c, 0x89, 0x49, 0x64, 0x99, 0xd6, 0x9c, 0x58, 0x4a, 0x75, 0x91, 0xdf, 0x9c, 0x48, 0x4c, 0xa1, 0x88, 0x02, 0x9c, 0x5b, 0x4f, 0x33, 0x7d, 0xb7, 0x9c, 0xd1, 0x53, 0xbc, 0x73, 0x0a, 0x9d, 0x53, 0x58, 0x87, 0x68, 0xd0, 0x9e, 0x4a, 0x5c, 0xa7, 0x5f, 0xe6, 0x9f, 0x43, 0x60, 0xed, 0x58, 0xb3, 0xa0, 0x92, 0x64, 0xd1, 0x51, 0x87, 0xa2, 0x91, 0x6a, 0x2f, 0x48, 0x85, 0xa0, 0x6f, 0x41, 0x6a, 0xcc, 0x1b, 0xa2, 0x96, 0x42, 0x82, 0xc3, 0x03, 0xa3, 0x8b, 0x42, 0xfd, 0xbc, 0xdd, 0xa4, 0x16, 0x43, 0x50, 0xb7, 0x62, 0xa4, 0xa7, 0x43, 0xa8, 0xb1, 0xe4, 0xa4, 0xcb, 0x44, 0x00, 0xab, 0xdf, 0xa4, 0xbb, 0x44, 0x57, 0xa5, 0x94, 0xa4, 0xb2, 0x44, 0xc1, 0x9f, 0x20, 0xa4, 0x87, 0x45, 0x95, 0x97, 0x98, 0xa4, 0x68, 0x46, 0x7b, 0x90, 0x02, 0xa4, 0x7a, 0x49, 0x28, 0x85, 0xa2, 0xa4, 0xc0, 0x4c, 0x3f, 0x7b, 0x30, 0xa5, 0x5d, 0x4f, 0xef, 0x70, 0x89, 0xa5, 0xda, 0x55, 0x41, 0x66, 0x31, 0xa6, 0xc9, 0x59, 0xae, 0x5d, 0xbb, 0xa7, 0xf1, 0x5d, 0xfa, 0x56, 0x20, 0xa9, 0xeb, 0x62, 0x8f, 0x4d, 0x34, 0xa8, 0x7d, 0x3e, 0xff, 0xcb, 0x7e, 0xa9, 0xdb, 0x3f, 0x7d, 0xc4, 0xb2, 0xaa, 0xea, 0x3f, 0xd8, 0xbe, 0x80, 0xab, 0x67, 0x3f, 0xff, 0xb9, 0x5c, 0xab, 0xe9, 0x40, 0x2c, 0xb4, 0x3e, 0xac, 0x53, 0x40, 0x64, 0xae, 0xfa, 0xac, 0x21, 0x40, 0xb1, 0xa8, 0xef, 0xab, 0xf7, 0x41, 0x05, 0xa2, 0xde, 0xab, 0xdc, 0x41, 0x93, 0x9c, 0x39, 0xab, 0xd0, 0x42, 0x5f, 0x95, 0x03, 0xab, 0xda, 0x43, 0xa6, 0x8d, 0x0d, 0xac, 0x06, 0x45, 0xf3, 0x83, 0x3f, 0xac, 0x73, 0x49, 0x6a, 0x78, 0xf0, 0xad, 0x09, 0x4d, 0x3e, 0x6e, 0x61, 0xad, 0x99, 0x52, 0x73, 0x63, 0xb7, 0xae, 0xd3, 0x56, 0xf9, 0x5b, 0x9b, 0xb0, 0xc5, 0x5c, 0x89, 0x50, 0xf6, 0xb0, 0x31, 0x3c, 0xcd, 0xcc, 0x6e, 0xb1, 0x39, 0x3d, 0x2f, 0xc6, 0x51, 0xb2, 0x52, 0x3d, 0xa1, 0xc0, 0x59, 0xb2, 0xcb, 0x3d, 0xb3, 0xbb, 0x5f, 0xb3, 0x40, 0x3d, 0xc4, 0xb6, 0x70, 0xb3, 0xb4, 0x3d, 0xd4, 0xb1, 0x7f, 0xb3, 0xb5, 0x3d, 0xfc, 0xab, 0xf5, 0xb3, 0x86, 0x3e, 0x30, 0xa6, 0x22, 0xb3, 0x53, 0x3e, 0x69, 0xa0, 0x47, 0xb3, 0x42, 0x3e, 0xf4, 0x99, 0x68, 0xb3, 0x39, 0x3f, 0x8a, 0x92, 0x7e, 0xb3, 0x48, 0x40, 0xed, 0x8a, 0x3a, 0xb3, 0x81, 0x42, 0xf0, 0x81, 0x04, 0xb4, 0x03, 0x46, 0xd4, 0x76, 0xdf, 0xb4, 0x8f, 0x4a, 0xd4, 0x6c, 0x77, 0xb5, 0x0e, 0x4f, 0xd3, 0x61, 0x61, 0xb6, 0xc7, 0x54, 0x81, 0x58, 0x1e, 0xb8, 0x62, 0x3a, 0x23, 0xcd, 0xc5, 0xb9, 0x2f, 0x3a, 0xc4, 0xc8, 0x1f, 0xba, 0x32, 0x3b, 0x81, 0xc2, 0x90, 0xba, 0xc4, 0x3b, 0xc3, 0xbd, 0x75, 0xbb, 0x18, 0x3b, 0xba, 0xb8, 0x99, 0xbb, 0x6b, 0x3b, 0xac, 0xb3, 0xbf, 0xbb, 0xa2, 0x3b, 0xa4, 0xae, 0xc2, 0xbb, 0x82, 0x3b, 0xc5, 0xa9, 0x2c, 0xbb, 0x6d, 0x3b, 0xe4, 0xa3, 0x97, 0xbb, 0x49, 0x3c, 0x20, 0x9d, 0xa1, 0xbb, 0x3c, 0x3c, 0x85, 0x96, 0xea, 0xbb, 0x2a, 0x3c, 0xf4, 0x90, 0x3c, 0xbb, 0x4a, 0x3e, 0x95, 0x87, 0xbc, 0xbb, 0x84, 0x40, 0x70, 0x7e, 0xe5, 0xbc, 0x0d, 0x43, 0xcf, 0x75, 0x60, 0xbc, 0x1c, 0x48, 0x3d, 0x6a, 0xa8, 0xbc, 0xe8, 0x4d, 0x55, 0x5f, 0x36, 0x22, 0xb7, 0xa0, 0xa5, 0x95, 0x0b, 0x24, 0xed, 0xa2, 0x10, 0x8c, 0xce, 0x26, 0x6d, 0xa3, 0xdb, 0x84, 0xe2, 0x27, 0x9d, 0xa5, 0xe4, 0x7c, 0xd7, 0x28, 0x7b, 0xa8, 0x61, 0x74, 0x9a, 0x29, 0x5c, 0xab, 0x07, 0x6c, 0x08, 0x2a, 0x3e, 0xad, 0xe7, 0x63, 0x23, 0x2b, 0xb5, 0xb0, 0x88, 0x5a, 0xd4, 0x2c, 0x8b, 0xb3, 0x0f, 0x52, 0x73, 0x2e, 0x72, 0xb5, 0x1b, 0x4c, 0x0e, 0x30, 0x2d, 0xb7, 0x24, 0x46, 0x2a, 0x33, 0x95, 0xb7, 0xd4, 0x40, 0x72, 0x34, 0x9e, 0xba, 0x19, 0x3c, 0x91, 0x39, 0x44, 0xb9, 0xc9, 0x38, 0xf3, 0x3c, 0xa9, 0xba, 0x29, 0x35, 0x72, 0x3f, 0xa6, 0xba, 0xa7, 0x32, 0x09, 0x42, 0x58, 0xbb, 0x5f, 0x2e, 0x84, 0x2a, 0x24, 0x99, 0xc0, 0x99, 0x27, 0x2c, 0xdc, 0x9a, 0xb0, 0x90, 0x45, 0x2e, 0xa8, 0x9c, 0x45, 0x88, 0x0d, 0x30, 0x4e, 0x9d, 0xcb, 0x7f, 0xd3, 0x31, 0x4d, 0xa0, 0x84, 0x77, 0x59, 0x32, 0x3a, 0xa3, 0x2c, 0x6e, 0xf2, 0x33, 0x74, 0xa5, 0xc3, 0x65, 0x8e, 0x34, 0xda, 0xa8, 0x98, 0x5c, 0xc7, 0x36, 0x57, 0xab, 0x67, 0x54, 0x3f, 0x38, 0x49, 0xad, 0xd3, 0x4c, 0xe5, 0x3a, 0xbf, 0xaf, 0xd2, 0x46, 0x93, 0x3c, 0xfc, 0xb1, 0x72, 0x40, 0x76, 0x3f, 0xdb, 0xb2, 0x9f, 0x3c, 0xf5, 0x42, 0x99, 0xb3, 0xb0, 0x39, 0xb4, 0x45, 0x3c, 0xb4, 0x9e, 0x36, 0x67, 0x47, 0xb9, 0xb5, 0x73, 0x33, 0x04, 0x4a, 0x1e, 0xb6, 0x39, 0x2f, 0x86, 0x30, 0xe9, 0x93, 0x0b, 0x9d, 0xfb, 0x34, 0x1b, 0x93, 0xbd, 0x94, 0x22, 0x36, 0x90, 0x94, 0xe6, 0x8b, 0x86, 0x38, 0x7a, 0x96, 0x93, 0x83, 0x21, 0x39, 0xfc, 0x98, 0xd5, 0x7a, 0xa4, 0x3b, 0x14, 0x9b, 0x73, 0x72, 0x30, 0x3c, 0x43, 0x9e, 0x2d, 0x68, 0xed, 0x3d, 0x7b, 0xa0, 0xc1, 0x5f, 0x58, 0x3f, 0x64, 0xa3, 0xda, 0x57, 0x3b, 0x41, 0x17, 0xa6, 0x8c, 0x4f, 0x2d, 0x43, 0x65, 0xa9, 0x19, 0x49, 0x07, 0x45, 0xa0, 0xab, 0x38, 0x42, 0xa4, 0x48, 0x26, 0xac, 0xe5, 0x3e, 0x2d, 0x4a, 0xc8, 0xae, 0x47, 0x3a, 0xca, 0x4d, 0x54, 0xaf, 0x78, 0x37, 0x69, 0x4f, 0xa8, 0xb0, 0x81, 0x34, 0x05, 0x51, 0xde, 0xb1, 0x85, 0x30, 0x84, 0x38, 0x5b, 0x8c, 0x48, 0xa1, 0x69, 0x3b, 0x93, 0x8c, 0xb3, 0x98, 0x6e, 0x3e, 0xc1, 0x8d, 0x16, 0x8f, 0xbc, 0x40, 0xa7, 0x8e, 0xd6, 0x86, 0xef, 0x42, 0x75, 0x90, 0xcf, 0x7e, 0x2d, 0x44, 0x19, 0x93, 0x36, 0x75, 0xa8, 0x45, 0x87, 0x95, 0xdf, 0x6d, 0x1c, 0x46, 0xd7, 0x98, 0xc4, 0x64, 0x0d, 0x48, 0x4f, 0x9b, 0xdd, 0x5b, 0x5f, 0x4a, 0x19, 0x9e, 0xf4, 0x53, 0x0b, 0x4c, 0x29, 0xa1, 0xee, 0x4c, 0x33, 0x4e, 0x4a, 0xa4, 0x85, 0x46, 0x0f, 0x50, 0x66, 0xa6, 0xb0, 0x3f, 0xf6, 0x52, 0xa3, 0xa8, 0x91, 0x3c, 0x70, 0x54, 0xdd, 0xaa, 0x2c, 0x38, 0xef, 0x57, 0x0b, 0xab, 0x99, 0x35, 0x70, 0x59, 0x51, 0xac, 0xf6, 0x31, 0xab, 0x40, 0x47, 0x85, 0x27, 0xa5, 0xc6, 0x44, 0x09, 0x85, 0x4e, 0x9d, 0x1c, 0x47, 0x4b, 0x85, 0xa7, 0x94, 0xb7, 0x4a, 0x25, 0x86, 0x75, 0x8c, 0x3d, 0x4c, 0x63, 0x87, 0xea, 0x83, 0xa7, 0x4d, 0xf1, 0x8a, 0x4b, 0x7a, 0xb6, 0x4f, 0x33, 0x8c, 0xe4, 0x71, 0x9f, 0x50, 0x72, 0x90, 0x0f, 0x68, 0xa0, 0x51, 0xa4, 0x93, 0x19, 0x5f, 0xf3, 0x53, 0x49, 0x96, 0xa0, 0x58, 0x29, 0x55, 0x02, 0x99, 0xce, 0x50, 0x3e, 0x57, 0x09, 0x9d, 0x20, 0x49, 0xdd, 0x59, 0x2f, 0x9f, 0xfe, 0x43, 0x5d, 0x5b, 0x11, 0xa2, 0x54, 0x3e, 0x6f, 0x5c, 0xf8, 0xa4, 0x59, 0x3a, 0xc6, 0x5e, 0xdb, 0xa6, 0x21, 0x37, 0x1f, 0x60, 0xf5, 0xa7, 0xe1, 0x33, 0x01, 0x49, 0xd9, 0x7d, 0x6b, 0xaa, 0xda, 0x4d, 0xa2, 0x7d, 0x85, 0xa2, 0x48, 0x50, 0xac, 0x7d, 0xf3, 0x99, 0xe8, 0x53, 0x9a, 0x7e, 0x68, 0x91, 0x79, 0x55, 0xf0, 0x7f, 0x82, 0x88, 0xf2, 0x58, 0x17, 0x80, 0x80, 0x80, 0x80, 0x59, 0x3b, 0x83, 0xfa, 0x77, 0x65, 0x5a, 0x59, 0x87, 0x0a, 0x6e, 0x74, 0x5b, 0x87, 0x8a, 0x47, 0x65, 0xc9, 0x5c, 0xdf, 0x8d, 0x86, 0x5d, 0x60, 0x5e, 0x8f, 0x91, 0x2d, 0x55, 0x92, 0x60, 0x31, 0x94, 0xbb, 0x4e, 0x3f, 0x61, 0xda, 0x98, 0x39, 0x47, 0xe1, 0x63, 0xa8, 0x9b, 0x34, 0x41, 0x63, 0x65, 0x7f, 0x9d, 0xc3, 0x3c, 0xf0, 0x67, 0x66, 0xa0, 0x14, 0x38, 0xd7, 0x69, 0x2a, 0xa2, 0x13, 0x34, 0xb0, 0x55, 0x4a, 0x74, 0x96, 0xb0, 0x6d, 0x58, 0xc4, 0x74, 0xef, 0xa7, 0xf5, 0x5c, 0x36, 0x75, 0x4d, 0x9f, 0xbf, 0x5e, 0x8b, 0x76, 0x46, 0x97, 0x2f, 0x60, 0xc7, 0x77, 0x4e, 0x8e, 0x99, 0x62, 0x35, 0x79, 0x03, 0x86, 0x12, 0x63, 0x76, 0x7b, 0x04, 0x7d, 0x64, 0x64, 0x76, 0x7e, 0x1d, 0x74, 0x5b, 0x65, 0xa5, 0x81, 0x6f, 0x6b, 0xa7, 0x66, 0xc8, 0x84, 0xc1, 0x63, 0x2b, 0x68, 0x12, 0x88, 0x5b, 0x5b, 0x0f, 0x69, 0xaa, 0x8b, 0xf4, 0x53, 0x3c, 0x6b, 0x51, 0x8f, 0xa7, 0x4c, 0x4c, 0x6c, 0xb6, 0x93, 0x3e, 0x46, 0x05, 0x6e, 0x3a, 0x96, 0x4d, 0x3f, 0xe5, 0x6f, 0xc7, 0x99, 0x13, 0x3b, 0x4a, 0x71, 0x8d, 0x9b, 0x9f, 0x36, 0x7f, 0x60, 0x91, 0x6b, 0x72, 0xb6, 0x0f, 0x64, 0x1e, 0x6c, 0x36, 0xad, 0x7c, 0x67, 0x5a, 0x6c, 0xe4, 0xa5, 0x4e, 0x69, 0xdf, 0x6d, 0xcb, 0x9c, 0xf5, 0x6b, 0x82, 0x6f, 0x23, 0x94, 0x5e, 0x6c, 0xf1, 0x70, 0xde, 0x8b, 0xbf, 0x6d, 0xfc, 0x72, 0xdd, 0x83, 0x35, 0x6e, 0xbc, 0x75, 0x98, 0x7a, 0x51, 0x6f, 0x9f, 0x78, 0xb3, 0x71, 0x61, 0x70, 0xd1, 0x7c, 0x1b, 0x68, 0xf6, 0x72, 0x56, 0x7f, 0x7a, 0x60, 0xb1, 0x73, 0x6a, 0x83, 0x47, 0x58, 0xe4, 0x74, 0xb9, 0x86, 0xc8, 0x51, 0x23, 0x75, 0xf5, 0x8a, 0xc4, 0x4a, 0x65, 0x77, 0x77, 0x8e, 0x5f, 0x44, 0x18, 0x78, 0xf8, 0x91, 0x95, 0x3e, 0x47, 0x7a, 0x5f, 0x94, 0x96, 0x38, 0xd8, 0x6d, 0x12, 0x62, 0xbe, 0xbb, 0xec, 0x70, 0xaf, 0x63, 0xe3, 0xb2, 0xff, 0x73, 0x4f, 0x64, 0xce, 0xaa, 0xce, 0x75, 0x9d, 0x65, 0xaf, 0xa2, 0xd4, 0x76, 0xe9, 0x67, 0x0a, 0x9a, 0x96, 0x77, 0xe9, 0x68, 0x96, 0x92, 0x31, 0x78, 0x9b, 0x6a, 0xc0, 0x89, 0x56, 0x79, 0x66, 0x6d, 0x04, 0x80, 0x8f, 0x7a, 0x1b, 0x70, 0x6c, 0x77, 0x3f, 0x7a, 0xcf, 0x73, 0xaa, 0x6e, 0x6f, 0x7b, 0xc4, 0x77, 0x1b, 0x66, 0x43, 0x7d, 0x00, 0x7a, 0x99, 0x5e, 0x40, 0x7e, 0x65, 0x7e, 0x40, 0x56, 0xcd, 0x7f, 0xcd, 0x81, 0x99, 0x4f, 0x70, 0x80, 0x9d, 0x85, 0xde, 0x48, 0xa3, 0x81, 0xdc, 0x89, 0x9e, 0x42, 0x2c, 0x83, 0x61, 0x8d, 0x3a, 0x3c, 0x04, 0x79, 0x4b, 0x5a, 0x87, 0xc1, 0x69, 0x7c, 0x3a, 0x5b, 0xd8, 0xb8, 0xa4, 0x7f, 0x23, 0x5d, 0x1a, 0xb0, 0x33, 0x80, 0xc7, 0x5e, 0x1d, 0xa8, 0x71, 0x82, 0x7b, 0x5f, 0x24, 0xa0, 0x8e, 0x82, 0xff, 0x60, 0xd0, 0x98, 0x52, 0x83, 0xa3, 0x62, 0x7e, 0x90, 0x22, 0x83, 0xe1, 0x65, 0x06, 0x86, 0xe9, 0x84, 0x53, 0x67, 0xb2, 0x7d, 0xc3, 0x84, 0xe3, 0x6b, 0x28, 0x74, 0x43, 0x85, 0xda, 0x6e, 0xb8, 0x6b, 0xb8, 0x86, 0xd1, 0x72, 0x36, 0x63, 0xb8, 0x87, 0xe7, 0x75, 0xd4, 0x5c, 0x01, 0x89, 0x3f, 0x79, 0x79, 0x54, 0xa5, 0x8a, 0x8c, 0x7d, 0x52, 0x4d, 0x8a, 0x8b, 0x4f, 0x81, 0xa2, 0x46, 0xb3, 0x8c, 0x31, 0x85, 0xb8, 0x3f, 0xa7, 0x85, 0xa1, 0x52, 0xfb, 0xc6, 0x4a, 0x87, 0xfd, 0x54, 0xaa, 0xbd, 0x14, 0x8a, 0x06, 0x56, 0x03, 0xb5, 0x32, 0x8b, 0xb7, 0x57, 0x42, 0xad, 0x96, 0x8c, 0xc3, 0x58, 0x5e, 0xa6, 0x25, 0x8d, 0xba, 0x59, 0x90, 0x9e, 0x76, 0x8e, 0x16, 0x5b, 0x20, 0x96, 0x62, 0x8e, 0x90, 0x5c, 0xe0, 0x8e, 0x02, 0x8e, 0xcb, 0x5f, 0x94, 0x84, 0x50, 0x8f, 0x00, 0x62, 0xb5, 0x7a, 0xac, 0x8f, 0x65, 0x66, 0x1c, 0x71, 0x15, 0x90, 0x71, 0x69, 0xd3, 0x68, 0xe4, 0x91, 0xa9, 0x6d, 0x6a, 0x61, 0x04, 0x92, 0xff, 0x71, 0x21, 0x59, 0xc2, 0x94, 0x5b, 0x74, 0xa7, 0x52, 0x72, 0x95, 0x3d, 0x79, 0x07, 0x4b, 0x3c, 0x96, 0x1f, 0x7d, 0xbe, 0x43, 0x99, 0x90, 0xf7, 0x4d, 0x98, 0xc9, 0xc4, 0x93, 0x0d, 0x4e, 0x8e, 0xc0, 0x61, 0x94, 0x5d, 0x4f, 0x79, 0xb9, 0x3c, 0x95, 0xd6, 0x50, 0x9d, 0xb2, 0x0f, 0x96, 0xcc, 0x51, 0xce, 0xab, 0x0c, 0x97, 0x98, 0x52, 0xef, 0xa3, 0xf4, 0x98, 0x1c, 0x54, 0x35, 0x9c, 0x67, 0x98, 0x4f, 0x55, 0xae, 0x94, 0x6e, 0x98, 0x84, 0x57, 0x88, 0x8b, 0xc2, 0x98, 0xae, 0x5a, 0x01, 0x82, 0x32, 0x99, 0x12, 0x5d, 0xca, 0x77, 0xea, 0x99, 0x9c, 0x61, 0x85, 0x6e, 0x2b, 0x9a, 0xa1, 0x65, 0x40, 0x66, 0x23, 0x9b, 0xdb, 0x68, 0xdb, 0x5e, 0x7c, 0x9d, 0x6e, 0x6c, 0x82, 0x57, 0x4d, 0x9f, 0x8e, 0x6f, 0xa2, 0x50, 0x02, 0xa0, 0x3c, 0x74, 0xfc, 0x47, 0xf5, 0x9a, 0x39, 0x49, 0x1d, 0xcd, 0x4e, 0x9c, 0x9f, 0x4a, 0x59, 0xc3, 0x75, 0x9e, 0x0c, 0x4b, 0x2d, 0xbc, 0x79, 0x9f, 0x21, 0x4b, 0xeb, 0xb6, 0x1d, 0xa0, 0x3e, 0x4c, 0xb8, 0xaf, 0xac, 0xa0, 0xb6, 0x4d, 0x71, 0xa8, 0xd3, 0xa1, 0x4e, 0x4e, 0x3c, 0xa1, 0xe7, 0xa1, 0x9a, 0x4f, 0x50, 0x9a, 0x3a, 0xa1, 0xd0, 0x50, 0x92, 0x92, 0x4c, 0xa1, 0xdc, 0x52, 0xa2, 0x89, 0x37, 0xa1, 0xfe, 0x55, 0x15, 0x7f, 0xb8, 0xa2, 0x7e, 0x59, 0x4f, 0x75, 0x79, 0xa3, 0x3c, 0x5d, 0x5e, 0x6b, 0xbc, 0xa4, 0x52, 0x61, 0x24, 0x63, 0x52, 0xa5, 0x72, 0x65, 0x13, 0x5b, 0xcd, 0xa7, 0x22, 0x68, 0xec, 0x54, 0x5c, 0xa9, 0x9c, 0x6d, 0x0e, 0x4c, 0x13, 0xa3, 0x09, 0x45, 0xdf, 0xcd, 0xa8, 0xa4, 0xfb, 0x46, 0xd3, 0xc5, 0x8c, 0xa6, 0x7b, 0x47, 0x90, 0xbe, 0xa4, 0xa7, 0x45, 0x48, 0x11, 0xb8, 0xd0, 0xa8, 0x1f, 0x48, 0x9f, 0xb2, 0xf1, 0xa8, 0xac, 0x49, 0x32, 0xac, 0xb6, 0xa8, 0xf0, 0x49, 0xc8, 0xa6, 0x28, 0xa9, 0x47, 0x4a, 0x6f, 0x9f, 0x7a, 0xa9, 0x77, 0x4b, 0x72, 0x97, 0xf5, 0xa9, 0xbf, 0x4c, 0x86, 0x90, 0x65, 0xa9, 0xf7, 0x4e, 0x8e, 0x86, 0xc1, 0xaa, 0x4a, 0x51, 0x1b, 0x7c, 0xef, 0xaa, 0xdb, 0x55, 0x25, 0x72, 0xf3, 0xab, 0xb4, 0x59, 0xb6, 0x69, 0x6b, 0xad, 0x0c, 0x5d, 0x9d, 0x60, 0xc6, 0xae, 0x71, 0x61, 0xd5, 0x58, 0xde, 0xb0, 0x7f, 0x66, 0x39, 0x50, 0x69, 0xab, 0x0f, 0x43, 0x04, 0xce, 0x0a, 0xac, 0x9f, 0x43, 0xb1, 0xc6, 0xff, 0xae, 0x16, 0x44, 0x46, 0xc0, 0x5b, 0xae, 0xcf, 0x44, 0xa0, 0xba, 0xe5, 0xaf, 0x8b, 0x44, 0xff, 0xb5, 0x77, 0xb0, 0x54, 0x45, 0x6a, 0xb0, 0x01, 0xb0, 0x6b, 0x45, 0xeb, 0xa9, 0xc0, 0xb0, 0x90, 0x46, 0x72, 0xa3, 0x72, 0xb0, 0xc6, 0x47, 0x26, 0x9c, 0xbe, 0xb1, 0x0b, 0x48, 0x11, 0x95, 0x90, 0xb1, 0x5a, 0x49, 0x3f, 0x8d, 0xe8, 0xb1, 0x9e, 0x4b, 0x37, 0x84, 0x7d, 0xb2, 0x16, 0x4d, 0xfe, 0x7a, 0xa2, 0xb2, 0xc1, 0x51, 0x31, 0x70, 0x84, 0xb3, 0x9f, 0x56, 0x84, 0x67, 0x08, 0xb5, 0x07, 0x5a, 0xb2, 0x5e, 0x3e, 0xb7, 0x07, 0x5f, 0x4a, 0x54, 0xaf, 0xb2, 0xc1, 0x40, 0x1a, 0xce, 0x71, 0xb4, 0x0c, 0x40, 0xa1, 0xc8, 0x5a, 0xb5, 0x7d, 0x41, 0x54, 0xc2, 0x7c, 0xb6, 0x74, 0x41, 0xc5, 0xbd, 0x1a, 0xb7, 0x1b, 0x42, 0x0b, 0xb7, 0xf2, 0xb7, 0xc4, 0x42, 0x56, 0xb2, 0xbf, 0xb8, 0x26, 0x42, 0xad, 0xad, 0x25, 0xb8, 0x3e, 0x43, 0x15, 0xa7, 0x19, 0xb8, 0x5f, 0x43, 0x82, 0xa1, 0x06, 0xb8, 0x85, 0x44, 0x27, 0x9a, 0x47, 0xb8, 0xb5, 0x44, 0xde, 0x93, 0x5b, 0xb8, 0xed, 0x46, 0x43, 0x8b, 0x5f, 0xb9, 0x29, 0x48, 0x45, 0x82, 0x50, 0xb9, 0xa0, 0x4b, 0x4a, 0x78, 0x98, 0xba, 0x4b, 0x4e, 0xa0, 0x6e, 0x7a, 0xbb, 0x33, 0x53, 0x9b, 0x64, 0x9c, 0xbc, 0xf4, 0x58, 0x04, 0x5a, 0xf5, 0xba, 0xe1, 0x3d, 0x58, 0xcf, 0x7d, 0xbb, 0xe7, 0x3e, 0x07, 0xc9, 0xeb, 0xbd, 0x2b, 0x3e, 0xb8, 0xc4, 0x90, 0xbe, 0x5f, 0x3f, 0x5c, 0xbf, 0x68, 0xbe, 0xe6, 0x3f, 0x7c, 0xba, 0x5b, 0xbf, 0x6e, 0x3f, 0xa1, 0xb5, 0x47, 0xbf, 0xf4, 0x3f, 0xc7, 0xb0, 0x31, 0xc0, 0x25, 0x40, 0x01, 0xaa, 0x68, 0xc0, 0x5c, 0x40, 0x3e, 0xa4, 0x97, 0xc0, 0x6f, 0x40, 0x9e, 0x9e, 0x8c, 0xc0, 0x83, 0x41, 0x16, 0x97, 0xf1, 0xc0, 0x8b, 0x41, 0xa4, 0x91, 0x45, 0xc0, 0xe1, 0x43, 0x41, 0x89, 0x1b, 0xc1, 0x25, 0x45, 0x36, 0x80, 0x6e, 0xc2, 0x2d, 0x47, 0xb3, 0x77, 0x9e, 0xc2, 0x00, 0x4b, 0xf9, 0x6c, 0xae, 0xc2, 0xc0, 0x50, 0xc9, 0x61, 0xd7, 0x2a, 0x65, 0xa6, 0xe7, 0x9a, 0x77, 0x2c, 0x7c, 0xa8, 0x07, 0x91, 0xee, 0x2d, 0xbe, 0xa9, 0xac, 0x89, 0xc5, 0x2e, 0xb0, 0xab, 0x74, 0x81, 0xab, 0x2f, 0x5e, 0xad, 0xd1, 0x79, 0x67, 0x2f, 0xd9, 0xb0, 0x3d, 0x71, 0x27, 0x30, 0x87, 0xb2, 0x9d, 0x68, 0x85, 0x30, 0x64, 0xb5, 0x72, 0x5f, 0xd8, 0x31, 0x30, 0xb7, 0xab, 0x57, 0x84, 0x32, 0x2b, 0xb9, 0x88, 0x4f, 0x88, 0x34, 0x0a, 0xbb, 0x4b, 0x49, 0xcf, 0x37, 0x88, 0xbb, 0x9d, 0x43, 0x9e, 0x38, 0xc9, 0xbd, 0x5b, 0x3e, 0x9a, 0x3d, 0x28, 0xbd, 0x06, 0x3a, 0xdd, 0x40, 0x56, 0xbd, 0x5d, 0x37, 0x50, 0x43, 0x1b, 0xbd, 0xc2, 0x33, 0xe0, 0x45, 0xc7, 0xbe, 0x16, 0x30, 0x52, 0x31, 0x2e, 0xa0, 0x4c, 0x9e, 0x03, 0x33, 0x9f, 0xa1, 0x3a, 0x95, 0x24, 0x35, 0xae, 0xa2, 0x6a, 0x8c, 0xae, 0x37, 0x22, 0xa4, 0x11, 0x84, 0x77, 0x38, 0x3c, 0xa6, 0x2c, 0x7c, 0x3a, 0x39, 0x06, 0xa8, 0xb2, 0x74, 0x01, 0x39, 0xea, 0xab, 0x33, 0x6b, 0x3b, 0x3a, 0xd3, 0xad, 0xb0, 0x61, 0xf0, 0x3b, 0xf4, 0xb0, 0x0b, 0x59, 0x00, 0x3c, 0xc6, 0xb1, 0xf7, 0x50, 0x53, 0x3e, 0xfa, 0xb3, 0xaa, 0x4a, 0x39, 0x41, 0x1b, 0xb5, 0x19, 0x44, 0x06, 0x43, 0x82, 0xb6, 0x34, 0x3f, 0x04, 0x46, 0x45, 0xb7, 0x12, 0x3b, 0xb7, 0x48, 0xe4, 0xb7, 0xd5, 0x38, 0x54, 0x4b, 0x5c, 0xb8, 0x7e, 0x34, 0xe4, 0x4d, 0xb8, 0xb9, 0x11, 0x31, 0x55, 0x37, 0x35, 0x99, 0xce, 0xa2, 0xb5, 0x3a, 0x86, 0x9a, 0x6f, 0x98, 0xba, 0x3d, 0x68, 0x9b, 0x0f, 0x90, 0x09, 0x3f, 0x17, 0x9c, 0xdf, 0x87, 0x8c, 0x40, 0x96, 0x9e, 0xda, 0x7f, 0x0c, 0x41, 0x87, 0xa1, 0x53, 0x76, 0xcb, 0x42, 0x7a, 0xa3, 0xb8, 0x6e, 0x75, 0x43, 0xa0, 0xa6, 0x11, 0x64, 0xfd, 0x44, 0xee, 0xa8, 0x91, 0x5c, 0x24, 0x46, 0x45, 0xaa, 0xee, 0x53, 0x89, 0x48, 0x21, 0xad, 0x1f, 0x4c, 0x6a, 0x4a, 0x5d, 0xae, 0xfa, 0x46, 0x22, 0x4c, 0x88, 0xb0, 0x76, 0x3f, 0xf8, 0x4e, 0xf9, 0xb1, 0x98, 0x3c, 0xb5, 0x51, 0x47, 0xb2, 0x9f, 0x39, 0x5f, 0x53, 0x77, 0xb3, 0x99, 0x35, 0xfd, 0x55, 0xaf, 0xb4, 0x85, 0x32, 0x5b, 0x3d, 0xef, 0x93, 0x72, 0xa5, 0x90, 0x41, 0x5b, 0x93, 0xb0, 0x9c, 0x79, 0x44, 0xbc, 0x94, 0x05, 0x93, 0xd1, 0x47, 0x45, 0x95, 0x29, 0x8b, 0x3b, 0x49, 0x0c, 0x96, 0xfa, 0x82, 0xae, 0x4a, 0x66, 0x99, 0x3c, 0x7a, 0x33, 0x4b, 0x68, 0x9b, 0xad, 0x71, 0xe2, 0x4c, 0x6a, 0x9e, 0x38, 0x68, 0xb5, 0x4d, 0x78, 0xa0, 0x97, 0x5f, 0x3b, 0x4f, 0x5a, 0xa3, 0x77, 0x57, 0x71, 0x50, 0xfe, 0xa5, 0xf8, 0x4f, 0x94, 0x53, 0x14, 0xa8, 0x52, 0x49, 0x73, 0x55, 0x1f, 0xaa, 0x48, 0x43, 0x1c, 0x57, 0x38, 0xab, 0xf5, 0x3e, 0x53, 0x59, 0x62, 0xad, 0x6c, 0x3a, 0xcf, 0x5b, 0x7f, 0xae, 0xbe, 0x37, 0x4b, 0x5d, 0xcd, 0xb0, 0x16, 0x33, 0x41, 0x45, 0x66, 0x8c, 0x8f, 0xa9, 0xe1, 0x49, 0x48, 0x8c, 0x9b, 0xa0, 0xc6, 0x4c, 0xaf, 0x8c, 0xe5, 0x98, 0x2f, 0x4f, 0xf7, 0x8d, 0x45, 0x8f, 0xa0, 0x51, 0xd9, 0x8e, 0xe4, 0x86, 0xf1, 0x53, 0x7f, 0x90, 0xb3, 0x7e, 0x3c, 0x54, 0xd9, 0x92, 0xf8, 0x75, 0xab, 0x56, 0x08, 0x95, 0x7b, 0x6d, 0x32, 0x57, 0x30, 0x98, 0x33, 0x64, 0x5b, 0x58, 0x9c, 0x9b, 0x1f, 0x5b, 0xef, 0x5a, 0x54, 0x9e, 0x26, 0x53, 0xf2, 0x5c, 0x29, 0xa1, 0x04, 0x4c, 0xe3, 0x5e, 0x01, 0xa3, 0x8d, 0x46, 0xc5, 0x5f, 0xd6, 0xa5, 0xb1, 0x40, 0x8c, 0x61, 0xa8, 0xa7, 0x87, 0x3c, 0xbc, 0x63, 0x84, 0xa9, 0x30, 0x38, 0xfc, 0x65, 0x96, 0xaa, 0xd7, 0x34, 0xaf, 0x4e, 0x5f, 0x85, 0x1a, 0xae, 0x5d, 0x52, 0x2f, 0x85, 0x28, 0xa5, 0xad, 0x55, 0xbe, 0x85, 0x46, 0x9d, 0x32, 0x58, 0xf1, 0x85, 0x8a, 0x94, 0xb3, 0x5b, 0x93, 0x86, 0x4c, 0x8c, 0x2d, 0x5d, 0x5f, 0x87, 0xbe, 0x83, 0x99, 0x5e, 0xb3, 0x8a, 0x11, 0x7a, 0xc9, 0x5f, 0xc6, 0x8c, 0xaa, 0x71, 0xe8, 0x60, 0xf4, 0x8f, 0x6b, 0x69, 0x4a, 0x62, 0x39, 0x92, 0x11, 0x60, 0xed, 0x63, 0xce, 0x95, 0x8e, 0x59, 0x25, 0x65, 0x6e, 0x98, 0xc0, 0x51, 0x48, 0x67, 0x0e, 0x9b, 0xee, 0x4a, 0xc3, 0x68, 0xd1, 0x9e, 0xaa, 0x44, 0x78, 0x6a, 0x8c, 0xa0, 0xf4, 0x3e, 0xfe, 0x6c, 0x2c, 0xa3, 0x09, 0x3a, 0xe8, 0x6d, 0xf1, 0xa5, 0x03, 0x36, 0x71, 0x58, 0x3f, 0x7d, 0x1d, 0xb3, 0x80, 0x5c, 0x01, 0x7d, 0x50, 0xaa, 0xbe, 0x5f, 0x96, 0x7d, 0x81, 0xa2, 0x85, 0x62, 0xac, 0x7d, 0xcf, 0x99, 0xfb, 0x65, 0x79, 0x7e, 0x3c, 0x91, 0x62, 0x67, 0x18, 0x7f, 0x6e, 0x88, 0xe8, 0x68, 0x85, 0x80, 0x80, 0x80, 0x80, 0x69, 0xc0, 0x83, 0xd1, 0x77, 0xb6, 0x6a, 0xf0, 0x86, 0xb1, 0x6f, 0x11, 0x6c, 0x20, 0x89, 0xbf, 0x66, 0xa0, 0x6d, 0x60, 0x8c, 0xad, 0x5e, 0x50, 0x6f, 0x11, 0x90, 0x11, 0x56, 0x89, 0x70, 0xa7, 0x93, 0x69, 0x4f, 0x13, 0x71, 0xf6, 0x96, 0xc7, 0x48, 0xe2, 0x73, 0x7b, 0x99, 0x98, 0x42, 0xbb, 0x75, 0x17, 0x9c, 0x20, 0x3d, 0x6c, 0x76, 0xd7, 0x9e, 0x85, 0x38, 0x78, 0x63, 0x7c, 0x73, 0xe4, 0xb8, 0xb1, 0x67, 0x57, 0x74, 0x54, 0xaf, 0xf4, 0x6a, 0xbd, 0x74, 0xb4, 0xa7, 0xb4, 0x6d, 0xe4, 0x75, 0x25, 0x9f, 0x64, 0x70, 0x07, 0x76, 0x39, 0x96, 0xf6, 0x71, 0xf6, 0x77, 0x5d, 0x8e, 0x8a, 0x72, 0xe9, 0x79, 0x0d, 0x86, 0x0f, 0x73, 0xd7, 0x7b, 0x0a, 0x7d, 0x7c, 0x74, 0xee, 0x7e, 0x07, 0x74, 0xb8, 0x76, 0x3f, 0x81, 0x26, 0x6c, 0x43, 0x77, 0x82, 0x84, 0x5c, 0x63, 0xfd, 0x78, 0xbd, 0x87, 0x97, 0x5c, 0x01, 0x7a, 0x1d, 0x8a, 0xe2, 0x54, 0x3b, 0x7b, 0x87, 0x8e, 0x4f, 0x4d, 0x0c, 0x7c, 0xe9, 0x91, 0xbb, 0x46, 0xe2, 0x7e, 0x56, 0x94, 0xa4, 0x40, 0xe1, 0x7f, 0xc3, 0x97, 0x76, 0x3b, 0x2d, 0x6f, 0x9b, 0x6a, 0xfd, 0xbe, 0x7a, 0x73, 0x24, 0x6b, 0xae, 0xb5, 0x7f, 0x76, 0x38, 0x6c, 0x4e, 0xad, 0x0d, 0x78, 0xd4, 0x6c, 0xf8, 0xa4, 0xdb, 0x7a, 0xee, 0x6d, 0xeb, 0x9c, 0x87, 0x7c, 0x79, 0x6f, 0x3c, 0x94, 0x09, 0x7d, 0xbd, 0x71, 0x02, 0x8b, 0x8c, 0x7e, 0x95, 0x73, 0x13, 0x83, 0x36, 0x7f, 0x4c, 0x75, 0xc6, 0x7a, 0x7d, 0x80, 0x36, 0x78, 0xcf, 0x71, 0xb2, 0x81, 0x55, 0x7c, 0x0e, 0x69, 0x80, 0x82, 0xc1, 0x7f, 0x39, 0x61, 0x7a, 0x84, 0x04, 0x82, 0x8a, 0x59, 0xe8, 0x85, 0x57, 0x85, 0xbf, 0x52, 0x4b, 0x86, 0x65, 0x89, 0x74, 0x4b, 0x53, 0x87, 0x93, 0x8d, 0x0d, 0x44, 0xf4, 0x89, 0x0b, 0x90, 0x7b, 0x3e, 0x99, 0x7b, 0x50, 0x61, 0xa0, 0xc3, 0x98, 0x7e, 0xab, 0x62, 0xf7, 0xba, 0xc6, 0x81, 0xc7, 0x64, 0x2a, 0xb2, 0x71, 0x84, 0x12, 0x65, 0x34, 0xaa, 0x5e, 0x86, 0x0f, 0x66, 0x27, 0xa2, 0x4e, 0x87, 0x44, 0x67, 0x83, 0x9a, 0x21, 0x88, 0x58, 0x68, 0xf4, 0x91, 0xe7, 0x89, 0x13, 0x6b, 0x1c, 0x89, 0x2f, 0x89, 0xe2, 0x6d, 0x61, 0x80, 0x93, 0x8a, 0xaa, 0x70, 0x99, 0x77, 0x83, 0x8b, 0x66, 0x73, 0xb7, 0x6e, 0xda, 0x8c, 0x70, 0x76, 0xff, 0x66, 0xf0, 0x8d, 0xc6, 0x7a, 0x20, 0x5f, 0x2c, 0x8f, 0x3a, 0x7d, 0xa4, 0x57, 0xe8, 0x90, 0xc9, 0x81, 0x05, 0x50, 0x8c, 0x91, 0x4c, 0x85, 0x08, 0x49, 0x80, 0x92, 0x2c, 0x88, 0xe0, 0x42, 0x86, 0x87, 0x97, 0x59, 0xf9, 0xc8, 0xbb, 0x8a, 0x93, 0x5b, 0x53, 0xbe, 0xfb, 0x8c, 0xeb, 0x5c, 0x93, 0xb7, 0x25, 0x8f, 0x30, 0x5d, 0xcb, 0xaf, 0x77, 0x90, 0x95, 0x5e, 0xdc, 0xa7, 0x8e, 0x91, 0xff, 0x5f, 0xf1, 0x9f, 0x7c, 0x92, 0xc0, 0x61, 0x85, 0x97, 0xac, 0x93, 0x9d, 0x63, 0x0a, 0x8f, 0xc9, 0x93, 0xf6, 0x65, 0x91, 0x86, 0xb3, 0x94, 0x7e, 0x68, 0x30, 0x7d, 0xbe, 0x95, 0x2f, 0x6b, 0x77, 0x74, 0x86, 0x96, 0x49, 0x6e, 0xce, 0x6c, 0x2c, 0x97, 0x6f, 0x72, 0x15, 0x64, 0x6c, 0x98, 0xb7, 0x75, 0x5a, 0x5c, 0xdc, 0x9a, 0x39, 0x78, 0xd4, 0x55, 0x7c, 0x9b, 0xad, 0x7c, 0x8d, 0x4e, 0x23, 0x9c, 0x9d, 0x80, 0xea, 0x46, 0x8f, 0x93, 0x00, 0x53, 0x1f, 0xcc, 0x91, 0x95, 0x86, 0x54, 0x7e, 0xc2, 0xea, 0x97, 0x6c, 0x55, 0xc0, 0xbb, 0x48, 0x99, 0x3a, 0x57, 0x02, 0xb4, 0x01, 0x9a, 0xb1, 0x58, 0x2a, 0xac, 0xac, 0x9b, 0xbd, 0x59, 0x3e, 0xa5, 0x35, 0x9c, 0xa9, 0x5a, 0x70, 0x9d, 0x88, 0x9d, 0x42, 0x5b, 0xe4, 0x95, 0xa9, 0x9d, 0xec, 0x5d, 0xa3, 0x8d, 0x6a, 0x9e, 0x6b, 0x60, 0x3f, 0x84, 0x1a, 0x9e, 0xc7, 0x63, 0x4a, 0x7a, 0xc9, 0x9f, 0x70, 0x66, 0x98, 0x71, 0x77, 0xa0, 0x86, 0x6a, 0x0d, 0x69, 0x7e, 0xa1, 0xe3, 0x6d, 0x4d, 0x61, 0xe4, 0xa3, 0xac, 0x70, 0xac, 0x5a, 0x72, 0xa5, 0x44, 0x74, 0x3c, 0x52, 0xc7, 0xa6, 0x75, 0x78, 0xa2, 0x4a, 0xc6, 0x9c, 0xb5, 0x4d, 0x48, 0xcf, 0xb6, 0x9f, 0x1d, 0x4e, 0x79, 0xc6, 0x53, 0xa1, 0x04, 0x4f, 0x9f, 0xbe, 0x77, 0xa2, 0x62, 0x50, 0xbf, 0xb7, 0xbc, 0xa3, 0xd9, 0x52, 0x10, 0xb0, 0xe9, 0xa4, 0xb5, 0x53, 0x23, 0xa9, 0xe5, 0xa5, 0x88, 0x54, 0x2d, 0xa2, 0xc3, 0xa6, 0x15, 0x55, 0x6b, 0x9b, 0x3d, 0xa6, 0x89, 0x56, 0xcc, 0x93, 0x83, 0xa6, 0xf1, 0x58, 0xb8, 0x8a, 0xe8, 0xa7, 0x63, 0x5b, 0x1d, 0x81, 0xad, 0xa8, 0x1a, 0x5e, 0x9f, 0x77, 0xd6, 0xa8, 0xe5, 0x62, 0x2c, 0x6e, 0x8e, 0xaa, 0x03, 0x65, 0xc8, 0x66, 0xb6, 0xab, 0x74, 0x69, 0x3b, 0x5f, 0x0c, 0xad, 0xad, 0x6c, 0xf9, 0x57, 0x62, 0xb0, 0x9a, 0x70, 0x89, 0x4f, 0x43, 0xa5, 0x67, 0x49, 0xef, 0xd0, 0xc3, 0xa7, 0x9c, 0x4b, 0x06, 0xc8, 0x35, 0xa9, 0x89, 0x4b, 0xdc, 0xc0, 0xe2, 0xaa, 0xb3, 0x4c, 0x8f, 0xba, 0x96, 0xab, 0xda, 0x4d, 0x51, 0xb4, 0x52, 0xac, 0xdd, 0x4e, 0x19, 0xad, 0xd2, 0xad, 0x7a, 0x4e, 0xd6, 0xa7, 0x05, 0xae, 0x32, 0x4f, 0x9e, 0xa0, 0x25, 0xae, 0xa4, 0x50, 0xd4, 0x98, 0xbc, 0xaf, 0x19, 0x52, 0x2a, 0x91, 0x53, 0xaf, 0x65, 0x54, 0x3e, 0x88, 0x55, 0xaf, 0xd4, 0x56, 0x9a, 0x7f, 0x22, 0xb0, 0x96, 0x5a, 0x58, 0x75, 0x5e, 0xb1, 0xb5, 0x5e, 0x35, 0x6c, 0x11, 0xb3, 0x0e, 0x61, 0xdd, 0x63, 0xd2, 0xb4, 0x7b, 0x65, 0xc7, 0x5b, 0xe9, 0xb6, 0x8f, 0x69, 0xf8, 0x53, 0x58, 0xad, 0xa8, 0x47, 0x1f, 0xd1, 0x2a, 0xaf, 0x99, 0x47, 0xee, 0xc9, 0x83, 0xb1, 0x46, 0x48, 0x89, 0xc2, 0xe9, 0xb2, 0x89, 0x49, 0x21, 0xbc, 0xdf, 0xb3, 0x7b, 0x49, 0xb4, 0xb7, 0x20, 0xb4, 0x73, 0x4a, 0x4e, 0xb1, 0x4d, 0xb4, 0xfb, 0x4a, 0xec, 0xaa, 0xfd, 0xb5, 0x71, 0x4b, 0x90, 0xa4, 0x83, 0xb5, 0xee, 0x4c, 0x4d, 0x9d, 0xcb, 0xb6, 0x5f, 0x4d, 0x3d, 0x96, 0x95, 0xb6, 0xdc, 0x4e, 0x4d, 0x8f, 0x2a, 0xb7, 0x1f, 0x50, 0x4a, 0x85, 0xed, 0xb7, 0x84, 0x52, 0xff, 0x7c, 0xad, 0xb8, 0x63, 0x56, 0x8c, 0x73, 0x14, 0xb9, 0x93, 0x5a, 0xbf, 0x69, 0xc9, 0xbb, 0x36, 0x5e, 0x7b, 0x60, 0xf0, 0xbd, 0x43, 0x62, 0xfe, 0x57, 0x88, 0xb5, 0xb8, 0x44, 0x17, 0xd0, 0xe6, 0xb7, 0x50, 0x44, 0xd3, 0xca, 0xc1, 0xb8, 0xe2, 0x45, 0x8a, 0xc4, 0xe0, 0xba, 0x5a, 0x46, 0x40, 0xbf, 0x2c, 0xbb, 0x1b, 0x46, 0xa6, 0xb9, 0xb4, 0xbb, 0xe2, 0x47, 0x12, 0xb4, 0x32, 0xbc, 0x90, 0x47, 0x83, 0xae, 0x82, 0xbc, 0xec, 0x48, 0x07, 0xa8, 0x50, 0xbd, 0x54, 0x48, 0x90, 0xa2, 0x19, 0xbd, 0xb0, 0x49, 0x39, 0x9b, 0x64, 0xbe, 0x0d, 0x49, 0xf8, 0x94, 0x68, 0xbe, 0x64, 0x4b, 0x32, 0x8c, 0xb5, 0xbe, 0x9c, 0x4d, 0x36, 0x83, 0xc9, 0xbf, 0x1b, 0x4f, 0xc5, 0x7a, 0x5a, 0xc0, 0x09, 0x52, 0xfa, 0x70, 0xcc, 0xc1, 0x45, 0x57, 0x9c, 0x67, 0x5c, 0xc3, 0x10, 0x5b, 0xcc, 0x5d, 0x8b, 0xbc, 0xfd, 0x40, 0x8b, 0xd1, 0x59, 0xbe, 0xb4, 0x41, 0x88, 0xcb, 0xc6, 0xc0, 0x53, 0x42, 0x6b, 0xc6, 0x90, 0xc2, 0x0d, 0x42, 0xfe, 0xc1, 0x99, 0xc3, 0x04, 0x43, 0x28, 0xbc, 0x7a, 0xc3, 0xaf, 0x43, 0x53, 0xb7, 0x36, 0xc4, 0x5a, 0x43, 0x84, 0xb1, 0xe8, 0xc4, 0xd7, 0x43, 0xd0, 0xac, 0x2f, 0xc5, 0x7c, 0x44, 0x07, 0xa6, 0x59, 0xc5, 0x97, 0x44, 0xa2, 0xa0, 0x30, 0xc5, 0xe9, 0x45, 0x39, 0x99, 0x6d, 0xc6, 0x3c, 0x45, 0xe2, 0x92, 0x91, 0xc6, 0xef, 0x47, 0x07, 0x8b, 0x00, 0xc7, 0x3d, 0x48, 0xf4, 0x82, 0x7c, 0xc8, 0x04, 0x4b, 0xad, 0x79, 0x82, 0xc8, 0x04, 0x4f, 0x9a, 0x6e, 0x9a, 0xc8, 0xe0, 0x54, 0xa2, 0x64, 0x07, 0x33, 0x1d, 0xac, 0xac, 0x9f, 0x79, 0x34, 0x89, 0xae, 0x09, 0x96, 0xf6, 0x35, 0xa3, 0xaf, 0x88, 0x8e, 0xa2, 0x36, 0x4b, 0xb1, 0x53, 0x86, 0x87, 0x36, 0x99, 0xb3, 0x3d, 0x7e, 0x6b, 0x36, 0xed, 0xb5, 0x62, 0x76, 0x58, 0x37, 0x2e, 0xb7, 0x79, 0x6e, 0x24, 0x37, 0x69, 0xb9, 0xd6, 0x65, 0x8b, 0x37, 0x24, 0xbc, 0x5c, 0x5d, 0x06, 0x37, 0xfe, 0xbd, 0xb2, 0x54, 0x84, 0x39, 0x42, 0xbf, 0x04, 0x4d, 0x46, 0x3b, 0xc6, 0xbf, 0xa8, 0x47, 0x13, 0x3d, 0xd8, 0xc0, 0x46, 0x40, 0x91, 0x40, 0x67, 0xc0, 0xd7, 0x3c, 0xbc, 0x43, 0xcf, 0xc0, 0xce, 0x39, 0x54, 0x46, 0x95, 0xc1, 0x01, 0x35, 0xd3, 0x49, 0x26, 0xc1, 0x38, 0x32, 0x2b, 0x38, 0xe6, 0xa6, 0x9b, 0xa2, 0xde, 0x3b, 0x4f, 0xa7, 0x66, 0x9a, 0x0b, 0x3d, 0x52, 0xa8, 0x66, 0x91, 0x9d, 0x3e, 0x9e, 0xaa, 0x0c, 0x89, 0x51, 0x3f, 0x8b, 0xab, 0xdc, 0x81, 0x08, 0x40, 0x46, 0xae, 0x42, 0x78, 0xda, 0x40, 0xcf, 0xb0, 0xac, 0x70, 0xb9, 0x41, 0x51, 0xb2, 0xd3, 0x67, 0xaa, 0x41, 0xc2, 0xb4, 0xf2, 0x5e, 0xd5, 0x42, 0x5a, 0xb6, 0x96, 0x56, 0x1b, 0x43, 0x65, 0xb8, 0x09, 0x4e, 0x44, 0x45, 0x8c, 0xb9, 0x20, 0x48, 0x47, 0x47, 0x94, 0xb9, 0xf3, 0x41, 0xf4, 0x4a, 0x21, 0xba, 0x99, 0x3d, 0xe3, 0x4c, 0xba, 0xbb, 0x21, 0x3a, 0x6b, 0x4f, 0x2a, 0xbb, 0x9d, 0x36, 0xe3, 0x51, 0x71, 0xbc, 0x24, 0x33, 0x40, 0x3e, 0x1b, 0xa0, 0xd1, 0xa6, 0x63, 0x41, 0x38, 0xa1, 0x1b, 0x9d, 0x0b, 0x44, 0x05, 0xa1, 0xb6, 0x94, 0x7d, 0x46, 0x2a, 0xa2, 0xea, 0x8c, 0x0a, 0x47, 0x7d, 0xa4, 0xc5, 0x83, 0xa6, 0x48, 0x74, 0xa6, 0xec, 0x7b, 0x64, 0x49, 0x28, 0xa9, 0x5d, 0x73, 0x47, 0x49, 0xf8, 0xab, 0xbc, 0x6a, 0x73, 0x4a, 0xda, 0xad, 0xf9, 0x61, 0x3c, 0x4b, 0xee, 0xaf, 0xfc, 0x58, 0x86, 0x4c, 0xdf, 0xb1, 0x9b, 0x50, 0x23, 0x4e, 0xf9, 0xb3, 0x00, 0x4a, 0x27, 0x50, 0xf6, 0xb4, 0x1c, 0x43, 0xfe, 0x52, 0xf6, 0xb5, 0x14, 0x3e, 0xff, 0x55, 0x30, 0xb6, 0x00, 0x3b, 0x9a, 0x57, 0x54, 0xb6, 0xe0, 0x38, 0x1a, 0x59, 0x83, 0xb7, 0xa7, 0x34, 0x63, 0x44, 0x7d, 0x9a, 0x6d, 0xaa, 0x39, 0x48, 0x13, 0x9a, 0x67, 0xa0, 0x9c, 0x4b, 0x2d, 0x9a, 0xd2, 0x97, 0xfa, 0x4e, 0x26, 0x9b, 0x59, 0x8f, 0x6e, 0x4f, 0x97, 0x9d, 0x5b, 0x86, 0xd4, 0x50, 0xbc, 0x9f, 0x82, 0x7e, 0x4a, 0x51, 0xa5, 0xa1, 0xb6, 0x76, 0x3c, 0x52, 0x83, 0xa3, 0xf6, 0x6d, 0xf6, 0x53, 0x99, 0xa6, 0x11, 0x64, 0xb2, 0x54, 0xe5, 0xa8, 0x5d, 0x5c, 0x2e, 0x56, 0x37, 0xaa, 0xa1, 0x54, 0x02, 0x57, 0xd4, 0xac, 0x9d, 0x4c, 0xd7, 0x59, 0xd0, 0xae, 0x4b, 0x46, 0x95, 0x5b, 0xc3, 0xaf, 0xaf, 0x40, 0x4b, 0x5d, 0xc2, 0xb0, 0xe7, 0x3c, 0xcf, 0x5f, 0xa9, 0xb2, 0x01, 0x39, 0x59, 0x61, 0xb9, 0xb3, 0x11, 0x35, 0x61, 0x4b, 0x40, 0x93, 0xd1, 0xae, 0x07, 0x4f, 0x0f, 0x93, 0xce, 0xa4, 0xbb, 0x52, 0xac, 0x93, 0xd3, 0x9b, 0xf8, 0x55, 0xfc, 0x94, 0x0a, 0x93, 0x72, 0x58, 0x3f, 0x95, 0x26, 0x8a, 0xe8, 0x59, 0xaa, 0x96, 0xe6, 0x82, 0x55, 0x5a, 0xc4, 0x99, 0x13, 0x79, 0xef, 0x5b, 0xa4, 0x9b, 0x6a, 0x71, 0xc3, 0x5c, 0xb8, 0x9d, 0xc4, 0x68, 0xe6, 0x5d, 0xe2, 0x9f, 0xf0, 0x5f, 0xaf, 0x5f, 0x9b, 0xa2, 0xc8, 0x58, 0x1a, 0x61, 0x03, 0xa5, 0x42, 0x50, 0x4d, 0x62, 0xbf, 0xa7, 0x7f, 0x4a, 0x2b, 0x64, 0x7b, 0xa9, 0x58, 0x44, 0x0a, 0x66, 0x3a, 0xaa, 0xf1, 0x3e, 0xd2, 0x68, 0x0e, 0xac, 0x7a, 0x3a, 0xf4, 0x6a, 0x10, 0xad, 0xfb, 0x36, 0x96, 0x53, 0x37, 0x8c, 0xcf, 0xb2, 0x20, 0x57, 0x44, 0x8c, 0x94, 0xa9, 0x33, 0x5b, 0x16, 0x8c, 0x65, 0xa0, 0x9d, 0x5e, 0x67, 0x8c, 0x92, 0x98, 0x12, 0x61, 0x75, 0x8c, 0xe1, 0x8f, 0x83, 0x62, 0xd3, 0x8e, 0x8d, 0x86, 0xc9, 0x64, 0x10, 0x90, 0x71, 0x7e, 0x0c, 0x65, 0x39, 0x92, 0xaf, 0x75, 0xbd, 0x66, 0x52, 0x95, 0x08, 0x6d, 0x83, 0x67, 0x87, 0x97, 0x85, 0x64, 0xff, 0x68, 0xdc, 0x9a, 0x41, 0x5c, 0xa9, 0x6a, 0x75, 0x9d, 0x43, 0x54, 0xc1, 0x6c, 0x11, 0xa0, 0x0a, 0x4d, 0x85, 0x6d, 0xa5, 0xa2, 0x61, 0x47, 0xb5, 0x6f, 0x37, 0xa4, 0x5a, 0x41, 0xc9, 0x70, 0xcc, 0xa6, 0x40, 0x3d, 0x27, 0x72, 0x91, 0xa8, 0x2e, 0x38, 0x74, 0x5c, 0xaf, 0x85, 0x0d, 0xb6, 0xf5, 0x60, 0xa3, 0x85, 0x01, 0xad, 0xf7, 0x64, 0x59, 0x84, 0xdd, 0xa5, 0x92, 0x67, 0xbf, 0x84, 0xe0, 0x9d, 0x17, 0x6a, 0x94, 0x85, 0x41, 0x94, 0x8f, 0x6c, 0x99, 0x86, 0x30, 0x8c, 0x11, 0x6d, 0xcd, 0x87, 0xb7, 0x83, 0x86, 0x6e, 0xfb, 0x89, 0xfc, 0x7a, 0xf1, 0x70, 0x20, 0x8c, 0x62, 0x72, 0x74, 0x71, 0x49, 0x8e, 0xf4, 0x6a, 0x03, 0x72, 0x78, 0x91, 0x7c, 0x61, 0xa9, 0x73, 0xfb, 0x94, 0xa9, 0x59, 0xe6, 0x75, 0x81, 0x97, 0xb6, 0x52, 0x24, 0x76, 0xf1, 0x9a, 0xaa, 0x4b, 0xa9, 0x78, 0x85, 0x9d, 0x32, 0x45, 0xa5, 0x7a, 0x3c, 0x9f, 0x5a, 0x3f, 0xc0, 0x7b, 0xcf, 0xa1, 0xaa, 0x3a, 0xa3, 0x66, 0xe1, 0x7c, 0xb4, 0xbb, 0xde, 0x6a, 0xdc, 0x7c, 0xc0, 0xb2, 0xf9, 0x6e, 0x7d, 0x7c, 0xc7, 0xaa, 0x84, 0x71, 0xce, 0x7c, 0xe7, 0xa2, 0x12, 0x74, 0x68, 0x7d, 0x7c, 0x99, 0xa9, 0x76, 0xc3, 0x7e, 0x2c, 0x91, 0x45, 0x77, 0xd4, 0x7f, 0x6a, 0x88, 0xdd, 0x78, 0xb9, 0x80, 0x80, 0x80, 0x80, 0x7a, 0x0e, 0x83, 0xb4, 0x77, 0xeb, 0x7b, 0x55, 0x86, 0x74, 0x6f, 0x76, 0x7c, 0x9f, 0x89, 0x63, 0x67, 0x3c, 0x7d, 0xef, 0x8c, 0x15, 0x5f, 0x0e, 0x7f, 0x57, 0x8f, 0x2f, 0x57, 0x39, 0x80, 0xab, 0x92, 0x35, 0x4f, 0xa1, 0x81, 0xec, 0x95, 0x6c, 0x49, 0xb3, 0x83, 0x4e, 0x98, 0x33, 0x43, 0xc8, 0x84, 0xda, 0x9a, 0xe1, 0x3d, 0xb0, 0x72, 0x72, 0x73, 0x7e, 0xc0, 0xf9, 0x76, 0x23, 0x73, 0xd4, 0xb8, 0x33, 0x79, 0x9a, 0x74, 0x0e, 0xaf, 0xba, 0x7c, 0x87, 0x74, 0x89, 0xa7, 0x65, 0x7f, 0x45, 0x75, 0x13, 0x9f, 0x07, 0x81, 0x14, 0x76, 0x31, 0x96, 0xab, 0x82, 0xaa, 0x77, 0x68, 0x8e, 0x52, 0x83, 0x64, 0x79, 0x27, 0x86, 0x00, 0x84, 0x25, 0x7b, 0x21, 0x7d, 0x90, 0x85, 0x3c, 0x7e, 0x0e, 0x74, 0xee, 0x86, 0x88, 0x81, 0x06, 0x6c, 0xab, 0x87, 0xdb, 0x83, 0xfa, 0x64, 0xb3, 0x89, 0x2a, 0x86, 0xe5, 0x5c, 0xe0, 0x8a, 0xa2, 0x89, 0xf5, 0x55, 0x37, 0x8c, 0x16, 0x8d, 0x17, 0x4d, 0xe4, 0x8d, 0x23, 0x90, 0x8c, 0x47, 0xa7, 0x8e, 0x19, 0x93, 0xf7, 0x41, 0x3f, 0x7d, 0xfc, 0x6a, 0x6b, 0xc6, 0x07, 0x81, 0x80, 0x6b, 0x45, 0xbd, 0x3d, 0x84, 0xab, 0x6b, 0xde, 0xb4, 0xe5, 0x87, 0x6b, 0x6c, 0x7f, 0xac, 0xa0, 0x89, 0xb1, 0x6d, 0x33, 0xa4, 0x5a, 0x8b, 0x81, 0x6e, 0x2c, 0x9c, 0x0c, 0x8c, 0xf5, 0x6f, 0x62, 0x93, 0xc0, 0x8e, 0x1c, 0x71, 0x27, 0x8b, 0x6c, 0x8e, 0xe6, 0x73, 0x3f, 0x83, 0x34, 0x8f, 0x9c, 0x75, 0xde, 0x7a, 0xa8, 0x90, 0x83, 0x78, 0xcf, 0x72, 0x0d, 0x91, 0xb9, 0x7b, 0xd4, 0x6a, 0x19, 0x93, 0x3c, 0x7e, 0xb2, 0x62, 0x64, 0x94, 0x9b, 0x81, 0xed, 0x5a, 0xdc, 0x95, 0xfe, 0x85, 0x24, 0x53, 0x4e, 0x97, 0x17, 0x88, 0x91, 0x4c, 0x20, 0x98, 0x22, 0x8c, 0x2a, 0x45, 0x2d, 0x89, 0xd2, 0x61, 0x28, 0xca, 0xd1, 0x8d, 0x5a, 0x62, 0x6d, 0xc1, 0x4d, 0x90, 0x0e, 0x63, 0xad, 0xb9, 0x68, 0x92, 0x88, 0x64, 0xbe, 0xb1, 0xa7, 0x94, 0x58, 0x65, 0xb9, 0xa9, 0x9e, 0x95, 0xff, 0x66, 0xac, 0xa1, 0x85, 0x97, 0x1c, 0x68, 0x03, 0x99, 0x8a, 0x98, 0x2f, 0x69, 0x65, 0x91, 0x87, 0x98, 0xe3, 0x6b, 0x8c, 0x88, 0xf2, 0x99, 0xa8, 0x6d, 0xc1, 0x80, 0x7c, 0x9a, 0x93, 0x70, 0xce, 0x77, 0xa9, 0x9b, 0x81, 0x73, 0xbb, 0x6f, 0x38, 0x9c, 0xb4, 0x76, 0xcb, 0x67, 0x90, 0x9e, 0x30, 0x79, 0xaf, 0x5f, 0xed, 0x9f, 0xd3, 0x7d, 0x24, 0x58, 0x81, 0xa1, 0x6a, 0x80, 0x9a, 0x51, 0x07, 0xa2, 0x4b, 0x84, 0x6d, 0x49, 0x5c, 0x95, 0x72, 0x59, 0xf1, 0xcf, 0x09, 0x98, 0x39, 0x5b, 0x09, 0xc5, 0xd6, 0x9a, 0xb5, 0x5c, 0x17, 0xbd, 0xb6, 0x9c, 0xd5, 0x5d, 0x35, 0xb6, 0x2c, 0x9e, 0xd6, 0x5e, 0x47, 0xae, 0x78, 0xa0, 0x1a, 0x5f, 0x55, 0xa6, 0xa6, 0xa1, 0x3e, 0x60, 0x80, 0x9e, 0xc5, 0xa1, 0xff, 0x62, 0x0e, 0x97, 0x15, 0xa2, 0xc5, 0x63, 0xa4, 0x8f, 0x3a, 0xa3, 0x34, 0x66, 0x1b, 0x86, 0x69, 0xa3, 0xcf, 0x68, 0xa9, 0x7d, 0xac, 0xa4, 0xbc, 0x6b, 0xd3, 0x74, 0xbb, 0xa6, 0x08, 0x6e, 0xf0, 0x6c, 0x98, 0xa7, 0x54, 0x72, 0x09, 0x65, 0x04, 0xa8, 0xc9, 0x75, 0x36, 0x5d, 0x5a, 0xaa, 0x65, 0x78, 0xc5, 0x55, 0x9f, 0xab, 0xed, 0x7c, 0x94, 0x4d, 0xa7, 0x9f, 0x73, 0x51, 0xe8, 0xd2, 0x24, 0xa1, 0xe6, 0x54, 0x17, 0xc9, 0x42, 0xa4, 0x45, 0x55, 0xb5, 0xc1, 0x10, 0xa5, 0xfb, 0x56, 0xf3, 0xb9, 0xe1, 0xa7, 0x9a, 0x58, 0x19, 0xb2, 0xbe, 0xa8, 0xd4, 0x59, 0x23, 0xab, 0x77, 0xa9, 0xdf, 0x5a, 0x25, 0xa4, 0x0e, 0xaa, 0xc2, 0x5b, 0x51, 0x9c, 0x7b, 0xab, 0x7a, 0x5c, 0xb2, 0x94, 0xc6, 0xac, 0x35, 0x5e, 0x6f, 0x8c, 0x86, 0xac, 0xe4, 0x60, 0xc3, 0x83, 0x9b, 0xad, 0x6f, 0x63, 0xd0, 0x7a, 0xa6, 0xae, 0x53, 0x67, 0x1f, 0x71, 0xbd, 0xaf, 0xa8, 0x6a, 0x7a, 0x69, 0xe5, 0xb1, 0x43, 0x6d, 0xa8, 0x62, 0x48, 0xb3, 0x4e, 0x71, 0x1e, 0x5a, 0x6a, 0xb4, 0xe3, 0x74, 0xf9, 0x51, 0xf8, 0xa8, 0x15, 0x4e, 0x25, 0xd3, 0x3b, 0xaa, 0x6d, 0x4f, 0x26, 0xcb, 0x08, 0xac, 0x96, 0x4f, 0xf8, 0xc3, 0xaa, 0xae, 0x6f, 0x51, 0x0f, 0xbc, 0xc5, 0xaf, 0xd8, 0x52, 0x4b, 0xb6, 0x23, 0xb1, 0x25, 0x53, 0x7a, 0xaf, 0x6e, 0xb1, 0xeb, 0x54, 0x7b, 0xa8, 0x7e, 0xb2, 0xc3, 0x55, 0x79, 0xa1, 0x7c, 0xb3, 0x64, 0x56, 0xb7, 0x9a, 0x24, 0xb3, 0xfe, 0x58, 0x09, 0x92, 0xae, 0xb4, 0x84, 0x59, 0xf5, 0x8a, 0x31, 0xb5, 0x1c, 0x5c, 0x2f, 0x81, 0x46, 0xb6, 0x22, 0x5f, 0x74, 0x77, 0xca, 0xb7, 0x29, 0x62, 0xd8, 0x6e, 0xf3, 0xb8, 0x86, 0x66, 0x64, 0x67, 0x02, 0xba, 0x3f, 0x69, 0xb9, 0x5f, 0x26, 0xbc, 0x4f, 0x6d, 0xd6, 0x56, 0x6e, 0xb0, 0x77, 0x4b, 0x36, 0xd3, 0xcc, 0xb2, 0xbc, 0x4c, 0x17, 0xcc, 0x29, 0xb4, 0x9a, 0x4c, 0xc1, 0xc5, 0x88, 0xb6, 0x58, 0x4d, 0x72, 0xbf, 0x1b, 0xb7, 0x6b, 0x4e, 0x27, 0xb9, 0x07, 0xb8, 0x89, 0x4e, 0xe4, 0xb2, 0xde, 0xb9, 0x6a, 0x4f, 0xa0, 0xac, 0x76, 0xba, 0x22, 0x50, 0x68, 0xa5, 0xdd, 0xba, 0xe2, 0x51, 0x5a, 0x9f, 0x2a, 0xbb, 0x6e, 0x52, 0x83, 0x97, 0xeb, 0xbc, 0x03, 0x53, 0xb3, 0x90, 0xa3, 0xbc, 0x58, 0x55, 0xe5, 0x87, 0xe2, 0xbc, 0xd9, 0x58, 0x42, 0x7e, 0xf8, 0xbe, 0x06, 0x5b, 0x8e, 0x75, 0x8a, 0xbf, 0x87, 0x5f, 0x0a, 0x6c, 0x51, 0xc1, 0x0e, 0x62, 0xae, 0x63, 0xe1, 0xc3, 0x07, 0x67, 0x3f, 0x5a, 0x11, 0xb8, 0xb3, 0x47, 0xf9, 0xd3, 0xba, 0xba, 0xae, 0x48, 0xef, 0xcd, 0x1a, 0xbc, 0x5f, 0x49, 0xae, 0xc7, 0x27, 0xbe, 0x12, 0x4a, 0x73, 0xc1, 0x52, 0xbf, 0x1b, 0x4b, 0x02, 0xbb, 0x96, 0xbf, 0xfd, 0x4b, 0x88, 0xb5, 0xd5, 0xc0, 0xef, 0x4b, 0xf6, 0xb0, 0x1e, 0xc1, 0x8d, 0x4c, 0x7f, 0xa9, 0xdf, 0xc2, 0x34, 0x4d, 0x10, 0xa3, 0x93, 0xc2, 0xcf, 0x4d, 0xbf, 0x9c, 0xe7, 0xc3, 0x58, 0x4e, 0x95, 0x95, 0xbd, 0xc3, 0xe0, 0x4f, 0xb7, 0x8e, 0x31, 0xc4, 0x26, 0x51, 0xf9, 0x85, 0x8d, 0xc4, 0xbb, 0x54, 0x9f, 0x7c, 0xa3, 0xc5, 0xd5, 0x57, 0xea, 0x73, 0x2e, 0xc7, 0x2e, 0x5b, 0xd9, 0x69, 0xaf, 0xc9, 0x07, 0x5f, 0xeb, 0x5f, 0xcc, 0xc0, 0x6e, 0x44, 0x88, 0xd4, 0x0f, 0xc2, 0x74, 0x45, 0x90, 0xce, 0x0a, 0xc4, 0x36, 0x46, 0x1d, 0xc8, 0xe6, 0xc5, 0xe6, 0x46, 0x77, 0xc3, 0xe6, 0xc7, 0x58, 0x46, 0xad, 0xbe, 0xdf, 0xc8, 0x18, 0x46, 0xfc, 0xb9, 0x60, 0xc8, 0xdb, 0x47, 0x50, 0xb3, 0xd7, 0xc9, 0x8c, 0x47, 0xb7, 0xae, 0x1a, 0xca, 0x1c, 0x48, 0x3a, 0xa7, 0xfc, 0xca, 0xab, 0x48, 0xcd, 0xa1, 0xd1, 0xcb, 0x28, 0x49, 0x8c, 0x9b, 0x04, 0xcb, 0x8a, 0x4a, 0x78, 0x93, 0xd6, 0xcc, 0x09, 0x4b, 0xd7, 0x8c, 0x23, 0xcc, 0x7a, 0x4d, 0xd4, 0x83, 0xc5, 0xcd, 0x03, 0x50, 0xb0, 0x7a, 0x8a, 0xcd, 0xd1, 0x54, 0x2b, 0x70, 0xef, 0xce, 0xff, 0x58, 0xd2, 0x66, 0x43, 0x3b, 0x3b, 0xb3, 0x06, 0xa4, 0xd7, 0x3d, 0x01, 0xb3, 0xed, 0x9c, 0x2b, 0x3d, 0xba, 0xb5, 0x5a, 0x93, 0xf2, 0x3e, 0x22, 0xb6, 0xf8, 0x8b, 0xb9, 0x3e, 0x36, 0xb8, 0xb5, 0x83, 0x76, 0x3e, 0x41, 0xba, 0x98, 0x7b, 0x4a, 0x3e, 0x35, 0xbc, 0x85, 0x73, 0x29, 0x3e, 0x0c, 0xbe, 0xc3, 0x6a, 0xcc, 0x3e, 0x1c, 0xc0, 0xe9, 0x62, 0xac, 0x3d, 0xff, 0xc2, 0x46, 0x5a, 0x04, 0x3d, 0x80, 0xc3, 0x64, 0x50, 0xd9, 0x3f, 0xa6, 0xc3, 0xbd, 0x4a, 0x88, 0x41, 0x08, 0xc4, 0x73, 0x44, 0x54, 0x44, 0x39, 0xc4, 0x24, 0x3f, 0x3d, 0x47, 0x23, 0xc4, 0x47, 0x3b, 0xb3, 0x49, 0xdd, 0xc4, 0x63, 0x38, 0x09, 0x4c, 0x8d, 0xc4, 0x63, 0x34, 0x38, 0x41, 0x00, 0xad, 0x10, 0xa7, 0xea, 0x43, 0xa4, 0xad, 0x64, 0x9e, 0xb4, 0x45, 0x10, 0xae, 0x8b, 0x96, 0x4b, 0x46, 0x34, 0xaf, 0xec, 0x8d, 0xe4, 0x46, 0xbc, 0xb1, 0xb2, 0x85, 0x95, 0x47, 0x21, 0xb3, 0x9c, 0x7d, 0x5c, 0x47, 0x64, 0xb5, 0xca, 0x75, 0x56, 0x47, 0xa8, 0xb7, 0xd7, 0x6d, 0x13, 0x48, 0x11, 0xb9, 0xec, 0x64, 0x95, 0x48, 0x7a, 0xbb, 0xaa, 0x5c, 0x2b, 0x48, 0xe5, 0xbc, 0xe7, 0x53, 0x8b, 0x4a, 0x53, 0xbd, 0xa9, 0x4c, 0x8f, 0x4c, 0x56, 0xbe, 0x0a, 0x46, 0x52, 0x4e, 0x47, 0xbe, 0x40, 0x40, 0x51, 0x50, 0xbc, 0xbe, 0xa1, 0x3c, 0xb6, 0x52, 0xfc, 0xbf, 0x18, 0x39, 0x18, 0x55, 0x3f, 0xbf, 0x7e, 0x35, 0x57, 0x46, 0x01, 0xa7, 0x35, 0xab, 0x64, 0x49, 0x2b, 0xa7, 0x41, 0xa1, 0xb9, 0x4b, 0x77, 0xa7, 0xf7, 0x99, 0x1d, 0x4d, 0x87, 0xa8, 0xd7, 0x90, 0xa4, 0x4e, 0x9b, 0xaa, 0xa9, 0x88, 0x23, 0x4f, 0x74, 0xac, 0x8b, 0x7f, 0x97, 0x4f, 0xea, 0xaf, 0x1f, 0x77, 0x7f, 0x50, 0x3e, 0xb1, 0x7c, 0x6f, 0x7d, 0x51, 0x0e, 0xb3, 0x45, 0x66, 0xca, 0x51, 0xc9, 0xb5, 0x05, 0x5e, 0x62, 0x52, 0x9b, 0xb6, 0x64, 0x56, 0x18, 0x53, 0xac, 0xb7, 0x88, 0x4e, 0x81, 0x55, 0x79, 0xb8, 0x51, 0x48, 0x8f, 0x57, 0x1d, 0xb8, 0xf6, 0x42, 0x5a, 0x59, 0x16, 0xb9, 0x9f, 0x3e, 0x15, 0x5b, 0x32, 0xba, 0x59, 0x3a, 0x72, 0x5d, 0x87, 0xbb, 0x01, 0x36, 0x49, 0x4b, 0x0a, 0xa1, 0x5e, 0xae, 0xe7, 0x4e, 0x9d, 0xa1, 0x4b, 0xa4, 0xe9, 0x51, 0xc9, 0xa1, 0x6b, 0x9b, 0xf8, 0x54, 0x95, 0xa1, 0xdf, 0x93, 0x8e, 0x56, 0x67, 0xa3, 0x2e, 0x8b, 0x22, 0x57, 0x78, 0xa5, 0x02, 0x82, 0xa6, 0x58, 0x4c, 0xa7, 0x2a, 0x7a, 0x7e, 0x58, 0xe8, 0xa9, 0x95, 0x72, 0x7e, 0x59, 0xc2, 0xab, 0xcb, 0x69, 0xde, 0x5a, 0xb3, 0xad, 0xd7, 0x61, 0x20, 0x5b, 0xb3, 0xaf, 0xc9, 0x58, 0xc7, 0x5c, 0x97, 0xb1, 0x52, 0x50, 0xa5, 0x5e, 0x64, 0xb2, 0x8a, 0x4a, 0x9a, 0x60, 0x1e, 0xb3, 0x8b, 0x44, 0xa5, 0x61, 0xc1, 0xb4, 0x5d, 0x3f, 0x56, 0x63, 0x9a, 0xb5, 0x49, 0x3b, 0xb4, 0x65, 0xa1, 0xb6, 0x2b, 0x37, 0x7f, 0x51, 0x81, 0x9b, 0x08, 0xb2, 0x44, 0x55, 0x7c, 0x9a, 0x9f, 0xa8, 0xd3, 0x59, 0x14, 0x9a, 0x54, 0x9f, 0xca, 0x5c, 0x25, 0x9a, 0xad, 0x97, 0x53, 0x5e, 0xe9, 0x9b, 0x43, 0x8e, 0xdf, 0x5f, 0xea, 0x9d, 0x19, 0x86, 0x4d, 0x60, 0xbe, 0x9f, 0x19, 0x7d, 0xdf, 0x61, 0x94, 0xa1, 0x56, 0x75, 0xe6, 0x62, 0x7b, 0xa3, 0x8a, 0x6d, 0xbd, 0x63, 0xa6, 0xa5, 0x93, 0x64, 0xe1, 0x64, 0xd8, 0xa7, 0xcd, 0x5c, 0x9e, 0x66, 0x0a, 0xaa, 0x01, 0x54, 0xbd, 0x67, 0x5e, 0xab, 0xe4, 0x4d, 0x99, 0x69, 0x0c, 0xad, 0x6e, 0x47, 0x99, 0x6a, 0xb0, 0xae, 0xb5, 0x41, 0x82, 0x6c, 0x72, 0xb0, 0x05, 0x3d, 0x1c, 0x6e, 0x4c, 0xb1, 0x3f, 0x38, 0x9e, 0x59, 0x38, 0x94, 0x20, 0xb6, 0x45, 0x5d, 0x39, 0x93, 0xa8, 0xac, 0xf4, 0x60, 0xd1, 0x93, 0x5a, 0xa4, 0x33, 0x64, 0x25, 0x93, 0x4e, 0x9b, 0x9d, 0x67, 0x03, 0x93, 0xaa, 0x93, 0x1c, 0x68, 0xc8, 0x94, 0xf0, 0x8a, 0x91, 0x69, 0xf7, 0x96, 0xb1, 0x82, 0x05, 0x6a, 0xfb, 0x98, 0xc4, 0x79, 0xd5, 0x6b, 0xd7, 0x9a, 0xef, 0x71, 0xe0, 0x6c, 0xd7, 0x9d, 0x27, 0x69, 0x3e, 0x6d, 0xce, 0x9f, 0x39, 0x60, 0x26, 0x6f, 0x64, 0xa2, 0x02, 0x58, 0x95, 0x70, 0xc2, 0xa4, 0x6a, 0x51, 0x05, 0x72, 0x3e, 0xa6, 0x74, 0x4b, 0x1b, 0x73, 0xc2, 0xa8, 0x31, 0x45, 0x54, 0x75, 0x46, 0xa9, 0xc1, 0x3f, 0xaf, 0x77, 0x16, 0xab, 0xb7, 0x3a, 0x9d, 0x61, 0xa6, 0x8c, 0xc4, 0xba, 0xc3, 0x65, 0xf7, 0x8c, 0x3d, 0xb1, 0x74, 0x69, 0xb6, 0x8b, 0xe5, 0xa8, 0xdf, 0x6d, 0x4f, 0x8b, 0x9d, 0xa0, 0x64, 0x6f, 0xe8, 0x8c, 0x17, 0x97, 0xdc, 0x72, 0x1e, 0x8c, 0xc8, 0x8f, 0x4d, 0x73, 0x2f, 0x8e, 0x7e, 0x86, 0xa6, 0x74, 0x47, 0x90, 0x5b, 0x7e, 0x0f, 0x75, 0x77, 0x92, 0x5e, 0x75, 0xf8, 0x76, 0x8d, 0x94, 0x89, 0x6d, 0xec, 0x77, 0xa3, 0x96, 0xee, 0x65, 0x91, 0x78, 0xc2, 0x99, 0x74, 0x5d, 0x5b, 0x7a, 0x34, 0x9c, 0x51, 0x55, 0xa3, 0x7b, 0xb2, 0x9e, 0xee, 0x4e, 0x6d, 0x7d, 0x45, 0xa1, 0x1e, 0x48, 0xae, 0x7e, 0xcb, 0xa2, 0xff, 0x43, 0x0d, 0x80, 0x72, 0xa5, 0x43, 0x3c, 0xeb, 0x6b, 0x5f, 0x84, 0xbe, 0xbf, 0x41, 0x6f, 0x76, 0x84, 0x77, 0xb6, 0x4d, 0x73, 0x4b, 0x84, 0x2f, 0xad, 0xb8, 0x76, 0x9f, 0x84, 0x21, 0xa5, 0x34, 0x79, 0x83, 0x84, 0x5b, 0x9c, 0xb8, 0x7b, 0xc7, 0x84, 0xfe, 0x94, 0x53, 0x7d, 0x4d, 0x86, 0x09, 0x8b, 0xed, 0x7e, 0x28, 0x87, 0x83, 0x83, 0x7e, 0x7f, 0x42, 0x89, 0xb3, 0x7b, 0x0d, 0x80, 0x71, 0x8c, 0x15, 0x72, 0xb0, 0x81, 0xaa, 0x8e, 0x8a, 0x6a, 0x75, 0x82, 0xe7, 0x90, 0xea, 0x62, 0x4f, 0x84, 0x1a, 0x93, 0xc6, 0x5a, 0xa3, 0x85, 0x71, 0x96, 0x9f, 0x53, 0x1e, 0x86, 0xb7, 0x99, 0x76, 0x4c, 0xa1, 0x88, 0x0c, 0x9c, 0x19, 0x46, 0xbc, 0x89, 0xc2, 0x9e, 0xbb, 0x3f, 0xfa, 0x75, 0xcb, 0x7c, 0x75, 0xc3, 0xf1, 0x79, 0xc8, 0x7c, 0x58, 0xbb, 0x47, 0x7d, 0x68, 0x7c, 0x42, 0xb2, 0xd8, 0x80, 0x9c, 0x7c, 0x62, 0xaa, 0x61, 0x83, 0x83, 0x7c, 0x9a, 0xa1, 0xd4, 0x85, 0x97, 0x7d, 0x4e, 0x99, 0x6c, 0x87, 0x6c, 0x7e, 0x1c, 0x91, 0x0c, 0x88, 0x32, 0x7f, 0x68, 0x88, 0xc0, 0x88, 0xdc, 0x80, 0x80, 0x80, 0x80, 0x8a, 0x2f, 0x83, 0x99, 0x78, 0x19, 0x8b, 0x62, 0x86, 0x42, 0x6f, 0xcc, 0x8c, 0xb0, 0x88, 0xee, 0x67, 0xd6, 0x8d, 0xfd, 0x8b, 0x54, 0x5f, 0xd2, 0x8f, 0xa7, 0x8e, 0x35, 0x58, 0x20, 0x91, 0x75, 0x90, 0xe9, 0x50, 0x89, 0x92, 0x13, 0x94, 0x51, 0x4a, 0x93, 0x93, 0x43, 0x97, 0xed, 0x43, 0x30, 0x81, 0x24, 0x73, 0x4f, 0xc8, 0xca, 0x84, 0xf2, 0x73, 0xb4, 0xc0, 0x1f, 0x88, 0x37, 0x73, 0xd1, 0xb7, 0xba, 0x8b, 0x51, 0x73, 0xf6, 0xaf, 0x54, 0x8d, 0xcf, 0x74, 0x7c, 0xa6, 0xf3, 0x90, 0x0c, 0x75, 0x1d, 0x9e, 0x91, 0x91, 0x88, 0x76, 0x3c, 0x96, 0x53, 0x92, 0xcc, 0x77, 0x81, 0x8e, 0x16, 0x93, 0x75, 0x79, 0x3e, 0x85, 0xe7, 0x94, 0x2e, 0x7b, 0x2f, 0x7d, 0xa4, 0x95, 0x4b, 0x7d, 0xff, 0x75, 0x39, 0x96, 0xa4, 0x80, 0xca, 0x6d, 0x2b, 0x98, 0x01, 0x83, 0x94, 0x65, 0x69, 0x99, 0x53, 0x86, 0x63, 0x5d, 0xad, 0x9a, 0xda, 0x89, 0x6e, 0x56, 0x31, 0x9c, 0x7e, 0x8c, 0x50, 0x4e, 0xdb, 0x9d, 0xbd, 0x8f, 0xba, 0x47, 0xd2, 0x8d, 0x0c, 0x6a, 0x32, 0xcd, 0x92, 0x90, 0x74, 0x6a, 0xf6, 0xc4, 0xa4, 0x93, 0x53, 0x6b, 0x89, 0xbc, 0x53, 0x95, 0xf1, 0x6c, 0x13, 0xb4, 0x1f, 0x98, 0x27, 0x6c, 0xb4, 0xab, 0xe0, 0x99, 0xfd, 0x6d, 0x75, 0xa3, 0xaa, 0x9b, 0x77, 0x6e, 0x84, 0x9b, 0x81, 0x9c, 0xb6, 0x6f, 0xcc, 0x93, 0x54, 0x9d, 0xab, 0x71, 0x99, 0x8b, 0x2a, 0x9e, 0x5d, 0x73, 0x8f, 0x83, 0x12, 0x9f, 0x37, 0x76, 0x08, 0x7a, 0xbb, 0xa0, 0x57, 0x78, 0xcb, 0x72, 0x65, 0xa1, 0xa9, 0x7b, 0xa5, 0x6a, 0x97, 0xa3, 0x41, 0x7e, 0x6f, 0x62, 0xef, 0xa4, 0xac, 0x81, 0x9c, 0x5b, 0x56, 0xa5, 0xde, 0x84, 0xdf, 0x53, 0xcc, 0xa7, 0x3a, 0x88, 0x4f, 0x4c, 0x15, 0x98, 0x1d, 0x61, 0x06, 0xd1, 0xd6, 0x9b, 0x36, 0x62, 0x01, 0xc8, 0xfb, 0x9e, 0x2a, 0x62, 0xe0, 0xc0, 0xb7, 0xa0, 0x86, 0x63, 0xfc, 0xb8, 0xb8, 0xa2, 0xa5, 0x64, 0xff, 0xb0, 0xa1, 0xa4, 0x03, 0x66, 0x0c, 0xa8, 0xb9, 0xa5, 0x5f, 0x67, 0x0a, 0xa0, 0xd0, 0xa6, 0x4a, 0x68, 0x74, 0x98, 0xef, 0xa7, 0x32, 0x69, 0xdf, 0x90, 0xf8, 0xa7, 0xe4, 0x6c, 0x01, 0x88, 0x8e, 0xa8, 0xaa, 0x6e, 0x16, 0x80, 0x47, 0xa9, 0xca, 0x71, 0x0a, 0x77, 0xc0, 0xaa, 0xe0, 0x73, 0xdc, 0x6f, 0x8c, 0xac, 0x33, 0x76, 0xd7, 0x67, 0xf2, 0xad, 0xca, 0x79, 0xac, 0x60, 0x50, 0xaf, 0x54, 0x7d, 0x2b, 0x58, 0x89, 0xb0, 0xad, 0x80, 0xc9, 0x50, 0x6d, 0xa2, 0x22, 0x59, 0x6e, 0xd5, 0xba, 0xa5, 0x25, 0x5b, 0x27, 0xcc, 0x00, 0xa7, 0xb4, 0x5c, 0x27, 0xc3, 0xfa, 0xa9, 0xdf, 0x5d, 0x06, 0xbc, 0x5c, 0xab, 0x9f, 0x5d, 0xfa, 0xb4, 0xe6, 0xad, 0x2b, 0x5e, 0xe3, 0xad, 0x5a, 0xae, 0x7b, 0x5f, 0xdb, 0xa5, 0xaf, 0xaf, 0xa4, 0x61, 0x10, 0x9e, 0x02, 0xb0, 0x55, 0x62, 0x9b, 0x96, 0x53, 0xb0, 0xfe, 0x64, 0x49, 0x8e, 0x6e, 0xb1, 0x91, 0x66, 0x99, 0x85, 0xf2, 0xb2, 0x4c, 0x69, 0x16, 0x7d, 0x7f, 0xb3, 0x79, 0x6c, 0x2f, 0x74, 0xf1, 0xb5, 0x04, 0x6f, 0x2e, 0x6c, 0xf2, 0xb6, 0x79, 0x72, 0x3c, 0x65, 0x43, 0xb7, 0xfc, 0x75, 0x6c, 0x5d, 0x60, 0xb9, 0x9b, 0x79, 0x48, 0x54, 0xcc, 0xaa, 0xf9, 0x53, 0xf3, 0xd6, 0xf7, 0xad, 0xfa, 0x55, 0x91, 0xcd, 0xaa, 0xb0, 0x5a, 0x56, 0x7d, 0xc6, 0x52, 0xb2, 0x80, 0x57, 0x49, 0xbf, 0x3a, 0xb3, 0xe8, 0x58, 0x56, 0xb8, 0x5c, 0xb5, 0x4d, 0x59, 0x46, 0xb1, 0x72, 0xb6, 0x63, 0x5a, 0x38, 0xaa, 0x5a, 0xb7, 0x74, 0x5b, 0x2e, 0xa3, 0x28, 0xb8, 0x4e, 0x5c, 0x56, 0x9b, 0xc7, 0xb8, 0xfe, 0x5d, 0xab, 0x94, 0x44, 0xb9, 0xad, 0x5f, 0x63, 0x8c, 0x26, 0xba, 0x47, 0x61, 0xab, 0x83, 0x72, 0xbb, 0x1e, 0x64, 0x8e, 0x7a, 0xc1, 0xbc, 0x62, 0x67, 0xa8, 0x72, 0x31, 0xbe, 0x05, 0x6a, 0xe0, 0x6a, 0x31, 0xc0, 0x06, 0x6d, 0xf3, 0x62, 0x60, 0xc1, 0xc0, 0x71, 0xfa, 0x59, 0x44, 0xb3, 0x50, 0x4f, 0x65, 0xd7, 0x82, 0xb6, 0x0f, 0x50, 0x53, 0xce, 0xd5, 0xb8, 0x2d, 0x51, 0x58, 0xc8, 0x1d, 0xba, 0x32, 0x52, 0x52, 0xc1, 0x8a, 0xbb, 0x9a, 0x53, 0x4c, 0xbb, 0x2b, 0xbc, 0xd4, 0x54, 0x3c, 0xb4, 0xcd, 0xbd, 0xf4, 0x55, 0x1d, 0xae, 0x53, 0xbe, 0xdc, 0x56, 0x06, 0xa7, 0x97, 0xbf, 0xce, 0x56, 0xee, 0xa0, 0xcd, 0xc0, 0x79, 0x58, 0x0e, 0x99, 0x91, 0xc1, 0x1b, 0x59, 0x45, 0x92, 0x3a, 0xc1, 0xa6, 0x5b, 0x33, 0x89, 0xe0, 0xc2, 0x46, 0x5d, 0x59, 0x81, 0x21, 0xc3, 0x92, 0x60, 0x65, 0x77, 0xea, 0xc4, 0xdb, 0x63, 0xaa, 0x6f, 0x3e, 0xc6, 0x5e, 0x67, 0x4a, 0x66, 0xde, 0xc8, 0xd1, 0x6b, 0xf9, 0x5b, 0xc6, 0xbc, 0x05, 0x4c, 0x35, 0xd6, 0xb6, 0xbe, 0x4c, 0x4d, 0x1b, 0xcf, 0x7f, 0xc0, 0x11, 0x4d, 0xd3, 0xc9, 0x72, 0xc1, 0xec, 0x4e, 0x5d, 0xc3, 0xb4, 0xc3, 0x6e, 0x4e, 0xd4, 0xbd, 0xf2, 0xc4, 0x78, 0x4f, 0x5a, 0xb8, 0x10, 0xc5, 0x83, 0x4f, 0xe5, 0xb2, 0x21, 0xc6, 0x66, 0x50, 0xa0, 0xab, 0xe0, 0xc7, 0x38, 0x51, 0x84, 0xa5, 0x68, 0xc8, 0x02, 0x52, 0x76, 0x9e, 0xbb, 0xc8, 0x96, 0x53, 0xa8, 0x97, 0x61, 0xc9, 0x2d, 0x54, 0xe8, 0x8f, 0xfd, 0xc9, 0xa4, 0x57, 0x20, 0x87, 0x8a, 0xca, 0x42, 0x59, 0x69, 0x7e, 0xfa, 0xcb, 0x76, 0x5c, 0xb4, 0x75, 0x86, 0xcc, 0xee, 0x60, 0x47, 0x6c, 0x22, 0xce, 0x95, 0x64, 0xa4, 0x62, 0x1b, 0xc4, 0x94, 0x48, 0xa8, 0xd7, 0x83, 0xc6, 0xc3, 0x49, 0xbe, 0xd0, 0xe5, 0xc8, 0x87, 0x4a, 0x2e, 0xcb, 0x87, 0xca, 0x27, 0x4a, 0x6f, 0xc6, 0x56, 0xcb, 0xc0, 0x4a, 0x86, 0xc1, 0x3d, 0xcc, 0xb4, 0x4a, 0xdf, 0xbb, 0xaa, 0xcd, 0x8e, 0x4b, 0x4c, 0xb5, 0xf0, 0xce, 0x5d, 0x4b, 0xc3, 0xb0, 0x28, 0xcf, 0x13, 0x4c, 0x6c, 0xa9, 0xe8, 0xcf, 0xc8, 0x4d, 0x26, 0xa3, 0x95, 0xd0, 0x6f, 0x4e, 0x0b, 0x9c, 0xcb, 0xd0, 0xd6, 0x4f, 0x3e, 0x95, 0x4c, 0xd1, 0x59, 0x50, 0xc5, 0x8d, 0x9b, 0xd1, 0xf5, 0x52, 0xd3, 0x85, 0x8c, 0xd2, 0x9e, 0x55, 0x5f, 0x7c, 0xe9, 0xd3, 0xa7, 0x58, 0xc4, 0x73, 0x45, 0xd5, 0x80, 0x5c, 0xd9, 0x68, 0x96, 0x43, 0x5d, 0xb9, 0x17, 0xaa, 0x49, 0x44, 0xdb, 0xb9, 0xd8, 0xa1, 0x82, 0x45, 0x72, 0xbb, 0x3a, 0x99, 0x17, 0x45, 0xa4, 0xbc, 0xd6, 0x90, 0xb8, 0x45, 0xab, 0xbe, 0x6c, 0x88, 0x4e, 0x45, 0x88, 0xbf, 0xfe, 0x7f, 0xd8, 0x45, 0x5b, 0xc1, 0xae, 0x77, 0xea, 0x45, 0x01, 0xc3, 0x36, 0x70, 0x11, 0x44, 0xef, 0xc5, 0x09, 0x68, 0x2a, 0x45, 0x16, 0xc6, 0x87, 0x60, 0x5e, 0x44, 0x8b, 0xc7, 0x5e, 0x57, 0x98, 0x44, 0x73, 0xc7, 0xee, 0x4f, 0x23, 0x44, 0x8c, 0xc8, 0xf9, 0x48, 0xed, 0x48, 0x29, 0xc7, 0xf4, 0x43, 0x4e, 0x4a, 0xb2, 0xc7, 0xc1, 0x3e, 0x6d, 0x4d, 0x70, 0xc7, 0xb2, 0x3a, 0xa4, 0x4f, 0xf1, 0xc7, 0xb3, 0x36, 0xa0, 0x48, 0xb8, 0xb3, 0x42, 0xac, 0xd9, 0x4b, 0x01, 0xb3, 0xa1, 0xa3, 0xdb, 0x4c, 0x7d, 0xb4, 0x88, 0x9b, 0x4d, 0x4d, 0x4f, 0xb5, 0xe2, 0x92, 0xe2, 0x4d, 0xd1, 0xb7, 0x72, 0x8a, 0x6d, 0x4e, 0x2d, 0xb9, 0x06, 0x81, 0xe9, 0x4e, 0x58, 0xba, 0xfc, 0x79, 0xd1, 0x4e, 0x60, 0xbc, 0xef, 0x71, 0xcd, 0x4e, 0xb5, 0xbe, 0xe5, 0x69, 0xac, 0x4f, 0x03, 0xc0, 0xae, 0x61, 0xd2, 0x4f, 0x6d, 0xc1, 0x8e, 0x59, 0xcc, 0x4f, 0xdc, 0xc2, 0x36, 0x51, 0xcc, 0x51, 0x3c, 0xc2, 0x59, 0x4b, 0x54, 0x52, 0xb6, 0xc2, 0x5f, 0x45, 0x04, 0x54, 0x51, 0xc2, 0x6d, 0x3f, 0x8c, 0x56, 0x85, 0xc2, 0xbe, 0x3b, 0xca, 0x58, 0xb8, 0xc3, 0x05, 0x37, 0xd1, 0x4d, 0xb9, 0xad, 0xa4, 0xaf, 0xb7, 0x50, 0x8e, 0xad, 0xbc, 0xa6, 0x80, 0x52, 0xf1, 0xae, 0x0a, 0x9d, 0x9f, 0x54, 0x76, 0xaf, 0x1b, 0x95, 0x12, 0x55, 0x97, 0xb0, 0x75, 0x8c, 0x87, 0x56, 0x3f, 0xb2, 0x19, 0x84, 0x08, 0x56, 0xb0, 0xb4, 0x16, 0x7b, 0xe0, 0x56, 0xe2, 0xb6, 0x53, 0x73, 0xfe, 0x57, 0x43, 0xb8, 0x58, 0x6b, 0xec, 0x57, 0xd1, 0xba, 0x40, 0x63, 0xeb, 0x58, 0x66, 0xbb, 0xb6, 0x5b, 0xe9, 0x59, 0x04, 0xbc, 0xa3, 0x53, 0xa9, 0x5a, 0x2e, 0xbd, 0x29, 0x4c, 0xdf, 0x5b, 0xa7, 0xbd, 0x73, 0x46, 0xff, 0x5d, 0x03, 0xbd, 0xa3, 0x41, 0x1c, 0x5f, 0x06, 0xbe, 0x27, 0x3d, 0x09, 0x61, 0x2e, 0xbe, 0x76, 0x38, 0xde, 0x52, 0xf1, 0xa7, 0xca, 0xb3, 0x1b, 0x56, 0x1d, 0xa7, 0x96, 0xa9, 0x94, 0x59, 0x06, 0xa7, 0x77, 0xa0, 0x86, 0x5b, 0x49, 0xa8, 0x29, 0x97, 0xfe, 0x5d, 0x5f, 0xa8, 0xfd, 0x8f, 0x78, 0x5e, 0x34, 0xaa, 0xc1, 0x86, 0xe9, 0x5e, 0xf4, 0xac, 0xa4, 0x7e, 0x6f, 0x5f, 0x70, 0xaf, 0x26, 0x76, 0x7a, 0x5f, 0xdd, 0xb1, 0x72, 0x6e, 0x8d, 0x60, 0xa6, 0xb3, 0x44, 0x66, 0x5c, 0x61, 0x59, 0xb5, 0x04, 0x5e, 0x6d, 0x62, 0x24, 0xb6, 0x33, 0x56, 0x70, 0x63, 0x0e, 0xb7, 0x2e, 0x4e, 0xec, 0x64, 0x7f, 0xb7, 0xca, 0x49, 0x1e, 0x65, 0xd3, 0xb8, 0x3c, 0x43, 0x37, 0x67, 0x5f, 0xb8, 0xc1, 0x3e, 0x52, 0x69, 0x52, 0xb9, 0x6a, 0x3a, 0x03, 0x58, 0xa4, 0xa1, 0xc9, 0xb6, 0xa8, 0x5c, 0x3a, 0xa1, 0x47, 0xac, 0xd8, 0x5f, 0x5d, 0xa1, 0x04, 0xa3, 0xb9, 0x62, 0x47, 0xa1, 0x28, 0x9b, 0x18, 0x64, 0xe8, 0xa1, 0xae, 0x92, 0xbb, 0x66, 0x58, 0xa3, 0x0e, 0x8a, 0x59, 0x67, 0x46, 0xa4, 0xc6, 0x81, 0xec, 0x68, 0x04, 0xa6, 0xe1, 0x79, 0xec, 0x68, 0x8e, 0xa9, 0x30, 0x72, 0x09, 0x69, 0x61, 0xab, 0x4d, 0x69, 0xaa, 0x6a, 0x43, 0xad, 0x50, 0x61, 0x56, 0x6b, 0x38, 0xaf, 0x2b, 0x59, 0x61, 0x6c, 0x0c, 0xb0, 0xb5, 0x51, 0x6c, 0x6d, 0x84, 0xb1, 0xce, 0x4b, 0x69, 0x6e, 0xf2, 0xb2, 0xa7, 0x45, 0xa5, 0x70, 0x48, 0xb3, 0x5b, 0x3f, 0xf4, 0x72, 0x30, 0xb4, 0x7f, 0x3b, 0x3b, 0x60, 0x1b, 0x9b, 0x0b, 0xba, 0x9c, 0x63, 0xd3, 0x9a, 0x5c, 0xb0, 0xda, 0x67, 0x24, 0x99, 0xf7, 0xa7, 0xf8, 0x6a, 0x58, 0x99, 0xab, 0x9f, 0x3b, 0x6c, 0xd1, 0x9a, 0x39, 0x96, 0xc5, 0x6e, 0xda, 0x9b, 0x1a, 0x8e, 0x4f, 0x6f, 0xee, 0x9c, 0xe5, 0x85, 0xd4, 0x70, 0xe5, 0x9e, 0xc7, 0x7d, 0x92, 0x71, 0xa4, 0xa0, 0xd9, 0x75, 0xcc, 0x72, 0x5d, 0xa2, 0xea, 0x6d, 0xcf, 0x73, 0x3f, 0xa4, 0xe6, 0x65, 0x22, 0x74, 0x46, 0xa7, 0x08, 0x5d, 0x23, 0x75, 0x7c, 0xa9, 0x27, 0x55, 0xab, 0x76, 0xb1, 0xaa, 0xf4, 0x4e, 0xa8, 0x78, 0x2b, 0xac, 0x74, 0x48, 0xec, 0x79, 0xa2, 0xad, 0xc6, 0x43, 0x25, 0x7b, 0x5d, 0xaf, 0x72, 0x3d, 0x48, 0x67, 0xb7, 0x93, 0xfd, 0xbe, 0x98, 0x6b, 0xc4, 0x93, 0x4d, 0xb5, 0x35, 0x6f, 0x86, 0x92, 0xc2, 0xac, 0x5c, 0x72, 0xe4, 0x92, 0x62, 0xa3, 0xc3, 0x75, 0x8c, 0x92, 0x97, 0x9b, 0x2f, 0x77, 0xa8, 0x93, 0x47, 0x92, 0xaf, 0x79, 0x10, 0x94, 0xa4, 0x8a, 0x3b, 0x7a, 0x28, 0x96, 0x4a, 0x81, 0xd1, 0x7b, 0x27, 0x98, 0x44, 0x79, 0xcc, 0x7b, 0xf7, 0x9a, 0x61, 0x71, 0xfb, 0x7c, 0xc3, 0x9c, 0x86, 0x69, 0xa4, 0x7d, 0x66, 0x9e, 0x7d, 0x60, 0xf0, 0x7e, 0xc6, 0xa1, 0x18, 0x59, 0x6a, 0x80, 0x2b, 0xa3, 0x6e, 0x52, 0x30, 0x81, 0x94, 0xa5, 0x63, 0x4c, 0x36, 0x83, 0x03, 0xa7, 0x2b, 0x46, 0x86, 0x84, 0x97, 0xa8, 0xf8, 0x40, 0x29, 0x70, 0x99, 0x8c, 0x67, 0xc2, 0xd0, 0x74, 0xde, 0x8b, 0xab, 0xb9, 0xc4, 0x78, 0xbc, 0x8b, 0x24, 0xb1, 0x14, 0x7c, 0x0e, 0x8a, 0xe5, 0xa8, 0x7a, 0x7f, 0x29, 0x8a, 0xc0, 0x9f, 0xd7, 0x81, 0x18, 0x8b, 0x89, 0x97, 0x6b, 0x82, 0xc4, 0x8c, 0x76, 0x8f, 0x00, 0x83, 0x8e, 0x8e, 0x13, 0x86, 0x89, 0x84, 0x6a, 0x8f, 0xe2, 0x7e, 0x18, 0x85, 0x80, 0x91, 0xf0, 0x76, 0x17, 0x86, 0x77, 0x94, 0x18, 0x6e, 0x2f, 0x87, 0x7f, 0x96, 0x5a, 0x66, 0x1d, 0x88, 0x83, 0x98, 0x9f, 0x5e, 0x26, 0x89, 0xda, 0x9b, 0x4d, 0x56, 0xcf, 0x8b, 0x2d, 0x9d, 0xce, 0x4f, 0xac, 0x8c, 0x86, 0xa0, 0x41, 0x49, 0xcf, 0x8e, 0x0c, 0xa2, 0xa3, 0x43, 0x52, 0x7a, 0x9e, 0x84, 0x7b, 0xc7, 0x76, 0x7e, 0xc7, 0x83, 0xfc, 0xbe, 0x89, 0x82, 0x40, 0x83, 0xb0, 0xb6, 0x09, 0x85, 0x81, 0x83, 0x7a, 0xad, 0x75, 0x88, 0x56, 0x83, 0x90, 0xa4, 0xd4, 0x8a, 0xa4, 0x83, 0xf9, 0x9c, 0x55, 0x8c, 0x5d, 0x84, 0xbe, 0x93, 0xfb, 0x8d, 0x85, 0x85, 0xdd, 0x8b, 0xb3, 0x8e, 0x3e, 0x87, 0x52, 0x83, 0x75, 0x8f, 0x36, 0x89, 0x78, 0x7b, 0x32, 0x90, 0x36, 0x8b, 0xcf, 0x72, 0xff, 0x91, 0x57, 0x8e, 0x1a, 0x6a, 0xf3, 0x92, 0x91, 0x90, 0x4b, 0x62, 0xe5, 0x93, 0xf3, 0x92, 0xf3, 0x5b, 0x71, 0x95, 0x69, 0x95, 0xaa, 0x54, 0x44, 0x96, 0x95, 0x98, 0x70, 0x4d, 0x8c, 0x97, 0xd9, 0x9b, 0x89, 0x46, 0x7b, 0x84, 0xf9, 0x7c, 0x39, 0xcc, 0x23, 0x88, 0xe1, 0x7c, 0x03, 0xc3, 0x6d, 0x8c, 0x65, 0x7b, 0xcf, 0xba, 0xf3, 0x8f, 0xb5, 0x7b, 0x9e, 0xb2, 0x6b, 0x92, 0x52, 0x7b, 0xd4, 0xa9, 0xd4, 0x94, 0x8e, 0x7c, 0x42, 0xa1, 0x4f, 0x96, 0x10, 0x7d, 0x26, 0x98, 0xfa, 0x97, 0x67, 0x7e, 0x14, 0x90, 0xaa, 0x98, 0x09, 0x7f, 0x66, 0x88, 0x8d, 0x98, 0xa1, 0x80, 0x80, 0x80, 0x80, 0x99, 0xfc, 0x83, 0x74, 0x78, 0x57, 0x9b, 0x32, 0x86, 0x06, 0x70, 0x44, 0x9c, 0x80, 0x88, 0xa6, 0x68, 0x73, 0x9d, 0xbb, 0x8b, 0x17, 0x60, 0x8a, 0x9f, 0x60, 0x8d, 0xe9, 0x59, 0x1f, 0xa1, 0x20, 0x90, 0x88, 0x51, 0xcf, 0xa2, 0x33, 0x93, 0xc7, 0x4a, 0xa9, 0x90, 0xa9, 0x73, 0x31, 0xd0, 0x7a, 0x94, 0x00, 0x73, 0x5b, 0xc7, 0xe3, 0x97, 0x23, 0x73, 0x77, 0xbf, 0x87, 0x99, 0xee, 0x73, 0x93, 0xb6, 0xf2, 0x9c, 0x7c, 0x73, 0xca, 0xae, 0x63, 0x9e, 0x67, 0x74, 0x73, 0xa6, 0x29, 0xa0, 0x13, 0x75, 0x46, 0x9d, 0xfa, 0xa1, 0x38, 0x76, 0x78, 0x95, 0xdb, 0xa2, 0x31, 0x77, 0xca, 0x8d, 0xc0, 0xa2, 0xd3, 0x79, 0x6c, 0x85, 0xbc, 0xa3, 0x98, 0x7b, 0x45, 0x7d, 0xa8, 0xa4, 0xd5, 0x7d, 0xf4, 0x75, 0x7c, 0xa6, 0x3a, 0x80, 0xa1, 0x6d, 0x8f, 0xa7, 0xa3, 0x83, 0x5e, 0x65, 0xe3, 0xa8, 0xeb, 0x86, 0x18, 0x5e, 0x33, 0xaa, 0x24, 0x89, 0x41, 0x56, 0xb0, 0xab, 0xa5, 0x8c, 0x6e, 0x4e, 0xdc, 0x9b, 0xd1, 0x69, 0xfe, 0xd4, 0xb5, 0x9e, 0xee, 0x6a, 0x7a, 0xcc, 0x0e, 0xa1, 0xc1, 0x6b, 0x0c, 0xc3, 0xc3, 0xa4, 0x38, 0x6b, 0xa2, 0xbb, 0x71, 0xa6, 0x65, 0x6c, 0x2c, 0xb3, 0x19, 0xa8, 0x1e, 0x6c, 0xdd, 0xaa, 0xff, 0xa9, 0x9a, 0x6d, 0xa4, 0xa3, 0x05, 0xaa, 0xbf, 0x6e, 0xc7, 0x9a, 0xf8, 0xab, 0xb4, 0x70, 0x20, 0x92, 0xce, 0xac, 0x86, 0x71, 0xee, 0x8a, 0xc9, 0xad, 0x45, 0x73, 0xc4, 0x82, 0xd9, 0xae, 0x42, 0x76, 0x31, 0x7a, 0xbd, 0xaf, 0x6f, 0x78, 0xea, 0x72, 0xa1, 0xb0, 0xda, 0x7b, 0xb1, 0x6a, 0xde, 0xb2, 0x94, 0x7e, 0x62, 0x63, 0x31, 0xb4, 0x08, 0x81, 0x80, 0x5b, 0x6c, 0xb5, 0x39, 0x84, 0xf8, 0x53, 0x43, 0xa5, 0xa1, 0x61, 0x30, 0xd8, 0x9c, 0xa8, 0xf2, 0x62, 0xc9, 0xce, 0xd0, 0xab, 0x97, 0x63, 0x67, 0xc6, 0xe1, 0xae, 0x13, 0x63, 0xd4, 0xbf, 0x26, 0xaf, 0xd7, 0x64, 0xb7, 0xb7, 0x61, 0xb1, 0x7a, 0x65, 0x7d, 0xaf, 0x9b, 0xb2, 0xc1, 0x66, 0x82, 0xa7, 0xe9, 0xb3, 0xff, 0x67, 0x79, 0xa0, 0x37, 0xb4, 0xb9, 0x68, 0xee, 0x98, 0x67, 0xb5, 0x78, 0x6a, 0x5a, 0x90, 0x87, 0xb6, 0x37, 0x6c, 0x6b, 0x88, 0x46, 0xb7, 0x0b, 0x6e, 0x65, 0x80, 0x2c, 0xb8, 0x60, 0x71, 0x3f, 0x77, 0xf3, 0xb9, 0x8f, 0x73, 0xf7, 0x6f, 0xe6, 0xbb, 0x10, 0x76, 0xe9, 0x68, 0x24, 0xbc, 0xcd, 0x79, 0xbb, 0x60, 0x5a, 0xbe, 0x9c, 0x7d, 0x67, 0x57, 0xd8, 0xae, 0xf5, 0x5b, 0x70, 0xd9, 0xd8, 0xb2, 0x2c, 0x5c, 0xa2, 0xd0, 0x86, 0xb4, 0x7d, 0x5d, 0x19, 0xc9, 0x39, 0xb6, 0xb1, 0x5d, 0x7b, 0xc2, 0x06, 0xb8, 0x5a, 0x5e, 0x23, 0xba, 0xe0, 0xb9, 0xcc, 0x5e, 0xd6, 0xb3, 0xaf, 0xbb, 0x1f, 0x5f, 0xa0, 0xac, 0x69, 0xbc, 0x56, 0x60, 0xaa, 0xa5, 0x0f, 0xbd, 0x4d, 0x61, 0xef, 0x9d, 0xa8, 0xbd, 0xdf, 0x63, 0x7b, 0x96, 0x21, 0xbe, 0x78, 0x65, 0x1e, 0x8e, 0x5d, 0xbf, 0x20, 0x67, 0x52, 0x85, 0xea, 0xc0, 0x0d, 0x69, 0xaa, 0x7d, 0x96, 0xc1, 0x94, 0x6c, 0x95, 0x75, 0x40, 0xc3, 0x59, 0x6f, 0x89, 0x6d, 0x1c, 0xc4, 0xc4, 0x72, 0xbc, 0x64, 0xfd, 0xc6, 0x66, 0x76, 0x6d, 0x5b, 0xeb, 0xb7, 0x82, 0x56, 0x1f, 0xda, 0xc2, 0xba, 0x73, 0x57, 0x22, 0xd1, 0xf1, 0xbc, 0x9d, 0x57, 0xd5, 0xca, 0xfa, 0xbe, 0x9a, 0x58, 0x73, 0xc4, 0x45, 0xc0, 0x4c, 0x59, 0x0f, 0xbd, 0xad, 0xc1, 0x9a, 0x59, 0xb3, 0xb7, 0x24, 0xc2, 0xda, 0x5a, 0x4e, 0xb0, 0x88, 0xc3, 0xe4, 0x5b, 0x2f, 0xa9, 0xa8, 0xc4, 0xeb, 0x5c, 0x1c, 0xa2, 0xae, 0xc5, 0xb1, 0x5d, 0x48, 0x9b, 0x63, 0xc6, 0x55, 0x5e, 0xa3, 0x93, 0xe8, 0xc6, 0xfb, 0x60, 0x67, 0x8b, 0xe0, 0xc7, 0x92, 0x62, 0xa0, 0x83, 0x73, 0xc8, 0x9d, 0x65, 0x64, 0x7a, 0xdd, 0xca, 0x19, 0x68, 0x76, 0x72, 0x3f, 0xcb, 0xba, 0x6b, 0xe0, 0x69, 0xb3, 0xcd, 0xdf, 0x70, 0x14, 0x5f, 0x31, 0xbf, 0xdc, 0x51, 0x0f, 0xd9, 0xe6, 0xc2, 0x9f, 0x51, 0xfd, 0xd2, 0xf3, 0xc4, 0xd3, 0x52, 0xc5, 0xcc, 0xc1, 0xc6, 0xa9, 0x53, 0x53, 0xc6, 0xc5, 0xc8, 0x5c, 0x53, 0xbc, 0xc0, 0xc3, 0xc9, 0x83, 0x54, 0x5b, 0xba, 0xa2, 0xca, 0x91, 0x54, 0xfd, 0xb4, 0x6f, 0xcb, 0x88, 0x55, 0xb2, 0xae, 0x10, 0xcc, 0x60, 0x56, 0xa7, 0xa7, 0x60, 0xcd, 0x36, 0x57, 0xa2, 0xa0, 0x9b, 0xcd, 0xd2, 0x58, 0xee, 0x99, 0x2e, 0xce, 0x6d, 0x5a, 0x4e, 0x91, 0xaa, 0xcf, 0x08, 0x5c, 0x43, 0x89, 0x84, 0xcf, 0xb1, 0x5e, 0x53, 0x81, 0x33, 0xd0, 0xf0, 0x61, 0x75, 0x78, 0x0a, 0xd2, 0x69, 0x64, 0xb2, 0x6f, 0x18, 0xd4, 0x43, 0x68, 0xab, 0x65, 0x53, 0xc9, 0xa1, 0x4c, 0xea, 0xdb, 0x1a, 0xcb, 0xc5, 0x4d, 0xdb, 0xd4, 0xb6, 0xcd, 0xaf, 0x4e, 0x8f, 0xce, 0xd4, 0xcf, 0x24, 0x4e, 0xc8, 0xc9, 0x44, 0xd0, 0xac, 0x4e, 0xd2, 0xc3, 0xc5, 0xd1, 0xd8, 0x4f, 0x1b, 0xbe, 0x1c, 0xd2, 0xcc, 0x4f, 0xa4, 0xb8, 0x34, 0xd3, 0xb3, 0x50, 0x3d, 0xb2, 0x39, 0xd4, 0x84, 0x51, 0x14, 0xab, 0xeb, 0xd5, 0x3f, 0x52, 0x04, 0xa5, 0x65, 0xd5, 0xf7, 0x52, 0xfc, 0x9e, 0xb0, 0xd6, 0x79, 0x54, 0x52, 0x97, 0x45, 0xd7, 0x09, 0x55, 0xb2, 0x8f, 0xd4, 0xd7, 0xaa, 0x57, 0xb9, 0x87, 0xa9, 0xd8, 0x5e, 0x59, 0xff, 0x7f, 0x3a, 0xd9, 0x8d, 0x5d, 0x5d, 0x75, 0x75, 0xdb, 0x4b, 0x60, 0xf9, 0x6b, 0x4c, 0x4b, 0xdf, 0xbe, 0x93, 0xaf, 0x32, 0x4c, 0xd1, 0xbf, 0x93, 0xa6, 0xb4, 0x4d, 0x59, 0xc0, 0xcb, 0x9e, 0x6c, 0x4d, 0x1d, 0xc2, 0x7a, 0x95, 0xd1, 0x4c, 0xc1, 0xc4, 0x1a, 0x8d, 0x3f, 0x4c, 0xca, 0xc5, 0x52, 0x84, 0xcc, 0x4c, 0x9e, 0xc6, 0xa8, 0x7c, 0xa5, 0x4c, 0x67, 0xc7, 0xec, 0x74, 0xef, 0x4c, 0x3b, 0xc9, 0x26, 0x6d, 0x4c, 0x4c, 0x2a, 0xca, 0x71, 0x65, 0xc7, 0x4b, 0xfa, 0xcb, 0x80, 0x5e, 0x46, 0x4b, 0x9a, 0xcc, 0x09, 0x56, 0x6c, 0x4a, 0x65, 0xcc, 0xfb, 0x4e, 0x91, 0x4a, 0xfb, 0xcd, 0x2c, 0x48, 0x59, 0x4d, 0x79, 0xcc, 0x3d, 0x42, 0x4e, 0x51, 0x14, 0xcb, 0x3e, 0x3d, 0xac, 0x53, 0x64, 0xcb, 0x4f, 0x39, 0x81, 0x50, 0xdc, 0xb8, 0xf7, 0xb1, 0xb7, 0x52, 0x92, 0xb9, 0x7b, 0xa8, 0xee, 0x53, 0xf6, 0xba, 0x31, 0xa0, 0x6f, 0x54, 0x8c, 0xbb, 0x94, 0x97, 0xd0, 0x54, 0xe3, 0xbd, 0x18, 0x8f, 0x2c, 0x55, 0x2d, 0xbe, 0x98, 0x86, 0x92, 0x55, 0x51, 0xc0, 0x30, 0x7e, 0x1d, 0x55, 0x35, 0xc1, 0xec, 0x76, 0x4d, 0x55, 0x2e, 0xc3, 0x7a, 0x6e, 0x96, 0x55, 0x6b, 0xc4, 0xee, 0x67, 0x2d, 0x55, 0x90, 0xc6, 0x48, 0x5f, 0xf0, 0x55, 0xed, 0xc6, 0x9e, 0x58, 0x44, 0x56, 0x44, 0xc6, 0xfc, 0x50, 0xa6, 0x57, 0x6f, 0xc6, 0xcc, 0x4a, 0x7c, 0x58, 0x9f, 0xc6, 0x89, 0x44, 0x58, 0x5a, 0x21, 0xc6, 0x72, 0x3e, 0xf8, 0x5c, 0x3c, 0xc6, 0x9d, 0x3a, 0xc0, 0x55, 0x91, 0xb3, 0xa3, 0xb4, 0x4e, 0x57, 0xe3, 0xb3, 0xba, 0xab, 0x1e, 0x59, 0xe4, 0xb4, 0x10, 0xa2, 0x78, 0x5b, 0x50, 0xb4, 0xf3, 0x99, 0xde, 0x5c, 0x70, 0xb6, 0x10, 0x91, 0x3a, 0x5d, 0x10, 0xb7, 0x94, 0x88, 0xaa, 0x5d, 0x8f, 0xb9, 0x24, 0x80, 0x0e, 0x5d, 0x9a, 0xbb, 0x6a, 0x78, 0x32, 0x5d, 0x73, 0xbd, 0xad, 0x70, 0x43, 0x5d, 0xe1, 0xbf, 0xa0, 0x68, 0xaa, 0x5e, 0x4d, 0xc1, 0x34, 0x61, 0x7b, 0x5e, 0xd5, 0xc1, 0xc4, 0x59, 0xb8, 0x5f, 0x61, 0xc2, 0x1b, 0x51, 0xc1, 0x60, 0x70, 0xc2, 0x1b, 0x4b, 0xa4, 0x61, 0x89, 0xc1, 0xfb, 0x45, 0xd1, 0x62, 0xa0, 0xc1, 0xd5, 0x40, 0x0c, 0x64, 0xb4, 0xc1, 0xfc, 0x3b, 0x81, 0x5a, 0x82, 0xae, 0x4b, 0xb7, 0x64, 0x5d, 0x52, 0xad, 0xf7, 0xad, 0xa2, 0x5f, 0xb9, 0xae, 0x07, 0xa4, 0xee, 0x61, 0xe8, 0xae, 0x5b, 0x9c, 0x4d, 0x63, 0xb1, 0xaf, 0x32, 0x93, 0xa4, 0x64, 0xd6, 0xb0, 0x82, 0x8b, 0x15, 0x65, 0x8f, 0xb2, 0x0b, 0x82, 0xa2, 0x65, 0xf8, 0xb4, 0x17, 0x7a, 0xb0, 0x66, 0x22, 0xb6, 0x5a, 0x72, 0xf3, 0x66, 0x89, 0xb8, 0x60, 0x6b, 0x38, 0x67, 0x06, 0xba, 0x4f, 0x63, 0xba, 0x67, 0x8f, 0xbb, 0xac, 0x5c, 0x1d, 0x68, 0x35, 0xbc, 0x62, 0x54, 0x25, 0x69, 0x14, 0xbc, 0xb6, 0x4d, 0x53, 0x6a, 0x27, 0xbc, 0xac, 0x47, 0x82, 0x6b, 0x23, 0xbc, 0x9c, 0x41, 0x8d, 0x6c, 0xe5, 0xbc, 0xe7, 0x3c, 0xaa, 0x60, 0x70, 0xa8, 0x40, 0xbb, 0x24, 0x63, 0x9a, 0xa7, 0x86, 0xb1, 0x08, 0x66, 0x51, 0xa7, 0x51, 0xa8, 0x2a, 0x68, 0xfa, 0xa7, 0x29, 0x9f, 0x6c, 0x6b, 0x1b, 0xa7, 0xe1, 0x96, 0xe8, 0x6c, 0xe6, 0xa8, 0xd6, 0x8e, 0x65, 0x6d, 0xc1, 0xaa, 0x82, 0x86, 0x00, 0x6e, 0x6d, 0xac, 0x5a, 0x7d, 0xc1, 0x6e, 0xb9, 0xae, 0xb6, 0x75, 0xdf, 0x6f, 0x14, 0xb0, 0xef, 0x6e, 0x02, 0x6f, 0xe0, 0xb2, 0xc1, 0x66, 0x43, 0x70, 0x85, 0xb4, 0x84, 0x5e, 0xc5, 0x71, 0x47, 0xb5, 0xa9, 0x57, 0x1c, 0x72, 0x0a, 0xb6, 0x9f, 0x4f, 0xa2, 0x73, 0x40, 0xb7, 0x13, 0x49, 0xe6, 0x74, 0x73, 0xb7, 0x7e, 0x44, 0x20, 0x76, 0x1d, 0xb8, 0x21, 0x3e, 0x10, 0x66, 0xd7, 0xa1, 0xe9, 0xbe, 0xaf, 0x6a, 0x2e, 0xa1, 0x29, 0xb4, 0xeb, 0x6d, 0x60, 0xa0, 0x95, 0xab, 0xb7, 0x70, 0x4d, 0xa0, 0x37, 0xa2, 0xcb, 0x72, 0xae, 0xa0, 0x8f, 0x9a, 0x45, 0x74, 0xc1, 0xa1, 0x3f, 0x91, 0xeb, 0x76, 0x17, 0xa2, 0xaf, 0x89, 0xa4, 0x77, 0x23, 0xa4, 0x4c, 0x81, 0x61, 0x77, 0xbe, 0xa6, 0x55, 0x79, 0x9b, 0x78, 0x1a, 0xa8, 0x86, 0x71, 0xeb, 0x78, 0xad, 0xaa, 0x91, 0x69, 0xd8, 0x79, 0x4c, 0xac, 0x8d, 0x61, 0xe1, 0x7a, 0x40, 0xae, 0x56, 0x5a, 0x65, 0x7b, 0x36, 0xaf, 0xd9, 0x52, 0xe5, 0x7c, 0x77, 0xb1, 0x09, 0x4c, 0xb4, 0x7d, 0xda, 0xb2, 0x0a, 0x47, 0x0c, 0x7f, 0x5f, 0xb3, 0x03, 0x40, 0xbb, 0x6d, 0xfa, 0x9b, 0x3e, 0xc2, 0x88, 0x71, 0xfa, 0x9a, 0x33, 0xb9, 0x07, 0x75, 0xc5, 0x99, 0x53, 0xaf, 0xfb, 0x78, 0xc8, 0x98, 0xf7, 0xa7, 0x36, 0x7b, 0x7d, 0x98, 0xd8, 0x9e, 0x81, 0x7d, 0x63, 0x99, 0x9b, 0x96, 0x1b, 0x7e, 0xfe, 0x9a, 0x9f, 0x8d, 0xc0, 0x80, 0x0d, 0x9c, 0x3a, 0x85, 0x76, 0x80, 0xf2, 0x9e, 0x0a, 0x7d, 0x61, 0x81, 0x7e, 0xa0, 0x39, 0x75, 0xb8, 0x81, 0xfb, 0xa2, 0x4c, 0x6d, 0xf6, 0x82, 0x8d, 0xa4, 0x2e, 0x65, 0xac, 0x83, 0x51, 0xa6, 0x31, 0x5d, 0xec, 0x84, 0x8d, 0xa8, 0x3d, 0x56, 0xd1, 0x85, 0xae, 0xa9, 0xfd, 0x4f, 0xc5, 0x87, 0x19, 0xab, 0x9d, 0x49, 0xf7, 0x88, 0x9e, 0xad, 0x30, 0x43, 0xbf, 0x76, 0xbe, 0x93, 0xaa, 0xc6, 0xaf, 0x7a, 0xed, 0x92, 0x91, 0xbd, 0x3b, 0x7e, 0x81, 0x91, 0xf0, 0xb4, 0x77, 0x81, 0xa7, 0x91, 0x84, 0xab, 0xbf, 0x84, 0x71, 0x91, 0x57, 0xa2, 0xff, 0x86, 0x7a, 0x91, 0xeb, 0x9a, 0x88, 0x88, 0x1c, 0x92, 0xcb, 0x92, 0x37, 0x89, 0x29, 0x94, 0x3d, 0x89, 0xec, 0x8a, 0x04, 0x95, 0xe1, 0x81, 0xaa, 0x8a, 0xcb, 0x97, 0xe6, 0x79, 0xc5, 0x8b, 0x63, 0x9a, 0x0f, 0x72, 0x12, 0x8c, 0x24, 0x9c, 0x13, 0x6a, 0x07, 0x8c, 0xe1, 0x9d, 0xd6, 0x61, 0xc2, 0x8e, 0x13, 0xa0, 0x32, 0x5a, 0x6d, 0x8f, 0x54, 0xa2, 0x71, 0x53, 0x95, 0x90, 0x96, 0xa4, 0x85, 0x4d, 0x3f, 0x92, 0x1d, 0xa6, 0xb0, 0x46, 0xaa, 0x80, 0x4d, 0x8b, 0xec, 0xcb, 0x19, 0x84, 0x4e, 0x8b, 0x02, 0xc1, 0xeb, 0x87, 0xba, 0x8a, 0x75, 0xb9, 0x38, 0x8a, 0xfb, 0x89, 0xfc, 0xb0, 0x83, 0x8d, 0xa3, 0x8a, 0x0d, 0xa7, 0xde, 0x90, 0x08, 0x8a, 0x4f, 0x9f, 0x48, 0x91, 0x7f, 0x8b, 0x2b, 0x96, 0xf9, 0x92, 0xc2, 0x8c, 0x25, 0x8e, 0xb1, 0x93, 0x73, 0x8d, 0xbf, 0x86, 0x75, 0x94, 0x30, 0x8f, 0x87, 0x7e, 0x3d, 0x95, 0x0d, 0x91, 0x9b, 0x76, 0x55, 0x95, 0xd7, 0x93, 0xbe, 0x6e, 0x97, 0x96, 0xe5, 0x95, 0xe7, 0x66, 0xbc, 0x97, 0xef, 0x97, 0xfc, 0x5e, 0xee, 0x99, 0x56, 0x9a, 0x83, 0x57, 0xd1, 0x9a, 0xab, 0x9c, 0xe0, 0x50, 0xae, 0x9c, 0x14, 0x9f, 0x89, 0x49, 0xa8, 0x89, 0xb9, 0x84, 0x52, 0xcf, 0x98, 0x8d, 0x98, 0x83, 0xb8, 0xc6, 0xc5, 0x91, 0x3d, 0x83, 0x2f, 0xbe, 0x39, 0x94, 0x7a, 0x82, 0xd8, 0xb5, 0x67, 0x97, 0x43, 0x82, 0xc1, 0xac, 0xad, 0x99, 0x69, 0x83, 0x1d, 0xa4, 0x2d, 0x9b, 0x11, 0x83, 0xbd, 0x9b, 0xd1, 0x9c, 0x49, 0x84, 0x93, 0x93, 0x8f, 0x9d, 0x29, 0x85, 0xbb, 0x8b, 0x74, 0x9d, 0xcc, 0x87, 0x2a, 0x83, 0x72, 0x9e, 0xc2, 0x89, 0x3b, 0x7b, 0x6d, 0x9f, 0xd6, 0x8b, 0x87, 0x73, 0x77, 0xa0, 0xe8, 0x8d, 0xdb, 0x6b, 0x90, 0xa2, 0x01, 0x90, 0x2a, 0x63, 0x9c, 0xa3, 0x47, 0x92, 0xa9, 0x5c, 0x40, 0xa4, 0xb2, 0x95, 0x4a, 0x55, 0x1e, 0xa6, 0x19, 0x98, 0x14, 0x4d, 0x9f, 0x94, 0x46, 0x7c, 0x4b, 0xd3, 0xe0, 0x98, 0x10, 0x7b, 0xdc, 0xcb, 0x10, 0x9b, 0x76, 0x7b, 0x77, 0xc2, 0xb4, 0x9e, 0x82, 0x7b, 0x30, 0xba, 0x06, 0xa1, 0x46, 0x7b, 0x03, 0xb1, 0x26, 0xa3, 0x1b, 0x7b, 0x8a, 0xa8, 0xe0, 0xa4, 0xbc, 0x7c, 0x1f, 0xa0, 0xa9, 0xa5, 0xc3, 0x7d, 0x26, 0x98, 0x7f, 0xa6, 0xb0, 0x7e, 0x2d, 0x90, 0x53, 0xa7, 0x56, 0x7f, 0x74, 0x88, 0x62, 0xa8, 0x05, 0x80, 0x80, 0x80, 0x80, 0xa9, 0x47, 0x83, 0x66, 0x78, 0x86, 0xaa, 0x65, 0x85, 0xf3, 0x70, 0x9d, 0xab, 0xba, 0x88, 0x78, 0x68, 0xe8, 0xac, 0xfe, 0x8a, 0xc7, 0x61, 0x2b, 0xae, 0x41, 0x8d, 0xb5, 0x59, 0xa9, 0xaf, 0xba, 0x90, 0xd6, 0x51, 0xaa, 0x9f, 0xf5, 0x72, 0xfe, 0xd7, 0xfc, 0xa3, 0x12, 0x73, 0x37, 0xcf, 0x12, 0xa5, 0xe0, 0x73, 0x5a, 0xc6, 0xcc, 0xa8, 0x8d, 0x73, 0x71, 0xbe, 0x7b, 0xaa, 0xd5, 0x73, 0x8f, 0xb5, 0xf0, 0xac, 0xd1, 0x73, 0xd1, 0xad, 0x98, 0xae, 0x43, 0x74, 0x78, 0xa5, 0x97, 0xaf, 0x7a, 0x75, 0x51, 0x9d, 0x8d, 0xb0, 0x45, 0x76, 0xa5, 0x95, 0x74, 0xb1, 0x04, 0x78, 0x0d, 0x8d, 0x6e, 0xb1, 0xc4, 0x79, 0x92, 0x85, 0x90, 0xb2, 0xa5, 0x7b, 0x59, 0x7d, 0xa7, 0xb3, 0xd7, 0x7d, 0xf8, 0x75, 0xa2, 0xb5, 0x2f, 0x80, 0x90, 0x6d, 0xc3, 0xb6, 0xc3, 0x83, 0x3c, 0x66, 0x19, 0xb8, 0x3c, 0x85, 0xe6, 0x5e, 0x5d, 0xb9, 0x9d, 0x89, 0x2b, 0x56, 0x38, 0xa9, 0xe1, 0x6a, 0xa5, 0xdb, 0xd4, 0xad, 0x30, 0x6b, 0x67, 0xd2, 0x54, 0xaf, 0xdd, 0x6b, 0xbf, 0xca, 0x2b, 0xb2, 0x48, 0x6b, 0xdb, 0xc2, 0x2f, 0xb4, 0x3b, 0x6c, 0x26, 0xba, 0x22, 0xb5, 0xf6, 0x6c, 0x7e, 0xb2, 0x16, 0xb7, 0x51, 0x6d, 0x2c, 0xaa, 0x3d, 0xb8, 0x83, 0x6d, 0xf0, 0xa2, 0x72, 0xb9, 0x6b, 0x6f, 0x17, 0x9a, 0x89, 0xba, 0x36, 0x70, 0x65, 0x92, 0x8f, 0xba, 0xf5, 0x72, 0x30, 0x8a, 0xac, 0xbb, 0xb4, 0x73, 0xfb, 0x82, 0xd7, 0xbc, 0xc4, 0x76, 0x50, 0x7a, 0xe1, 0xbd, 0xfb, 0x78, 0xe8, 0x72, 0xe3, 0xbf, 0x86, 0x7b, 0x9f, 0x6b, 0x03, 0xc1, 0x55, 0x7e, 0x6c, 0x63, 0x21, 0xc3, 0x1a, 0x81, 0xb4, 0x5a, 0xd1, 0xb3, 0xa1, 0x63, 0x5a, 0xdd, 0x0b, 0xb6, 0x8e, 0x64, 0x1e, 0xd4, 0x6c, 0xb9, 0x01, 0x64, 0x72, 0xcc, 0x99, 0xbb, 0x30, 0x64, 0xa0, 0xc5, 0x16, 0xbd, 0x1b, 0x64, 0xef, 0xbd, 0x9e, 0xbe, 0x91, 0x65, 0x88, 0xb6, 0x27, 0xbf, 0xf0, 0x66, 0x27, 0xae, 0xb4, 0xc1, 0x06, 0x67, 0x26, 0xa7, 0x46, 0xc2, 0x0b, 0x68, 0x1e, 0x9f, 0xc9, 0xc2, 0xac, 0x69, 0x8d, 0x98, 0x20, 0xc3, 0x56, 0x6a, 0xea, 0x90, 0x67, 0xc4, 0x23, 0x6c, 0xe2, 0x88, 0x3d, 0xc5, 0x04, 0x6e, 0xc1, 0x80, 0x35, 0xc6, 0x7e, 0x71, 0x8e, 0x78, 0x05, 0xc7, 0xc1, 0x74, 0x3f, 0x6f, 0xf6, 0xc9, 0x1c, 0x77, 0x71, 0x67, 0xc9, 0xca, 0xdc, 0x7a, 0xf5, 0x5e, 0xb9, 0xbc, 0xc4, 0x5d, 0x86, 0xdd, 0xf0, 0xbf, 0x52, 0x5d, 0xec, 0xd5, 0xe0, 0xc1, 0x95, 0x5e, 0x16, 0xce, 0x93, 0xc3, 0x93, 0x5e, 0x4f, 0xc7, 0xb3, 0xc5, 0x6f, 0x5e, 0x79, 0xc0, 0xd9, 0xc6, 0xc0, 0x5e, 0xf8, 0xb9, 0xf0, 0xc7, 0xf3, 0x5f, 0x80, 0xb2, 0xf4, 0xc9, 0x07, 0x60, 0x59, 0xab, 0xdd, 0xc9, 0xfb, 0x61, 0x8a, 0xa4, 0xb2, 0xca, 0xc5, 0x62, 0xd9, 0x9d, 0x5f, 0xcb, 0x5a, 0x64, 0x65, 0x95, 0xce, 0xcb, 0xf8, 0x66, 0x07, 0x8e, 0x0c, 0xcc, 0xb1, 0x68, 0x0f, 0x85, 0xdc, 0xcd, 0xb4, 0x6a, 0x4a, 0x7d, 0x9d, 0xcf, 0x6a, 0x6d, 0x41, 0x75, 0x11, 0xd1, 0x2f, 0x70, 0x60, 0x6c, 0x7c, 0xd2, 0xa9, 0x74, 0x82, 0x62, 0x0c, 0xc5, 0xbc, 0x57, 0xbc, 0xde, 0x0c, 0xc8, 0x1e, 0x58, 0x4f, 0xd7, 0x30, 0xca, 0x48, 0x58, 0xe7, 0xd0, 0x9a, 0xcb, 0xfb, 0x59, 0x31, 0xca, 0x32, 0xcd, 0x8b, 0x59, 0x5f, 0xc3, 0xd1, 0xce, 0xd7, 0x59, 0xaf, 0xbd, 0x68, 0xcf, 0xd4, 0x5a, 0x3d, 0xb6, 0xe0, 0xd0, 0xd0, 0x5a, 0xcc, 0xb0, 0x4a, 0xd1, 0xac, 0x5b, 0xcb, 0xa9, 0x69, 0xd2, 0x81, 0x5c, 0xd0, 0xa2, 0x71, 0xd3, 0x37, 0x5e, 0x10, 0x9b, 0x17, 0xd3, 0xe7, 0x5f, 0x71, 0x93, 0x91, 0xd4, 0x8f, 0x61, 0x4e, 0x8b, 0xa4, 0xd5, 0x3b, 0x63, 0x7d, 0x83, 0x6d, 0xd6, 0x57, 0x66, 0x2b, 0x7a, 0xc3, 0xd7, 0xf1, 0x69, 0x3f, 0x71, 0xdc, 0xd9, 0xec, 0x6c, 0xe4, 0x68, 0x4f, 0xcf, 0x76, 0x51, 0xcf, 0xde, 0x40, 0xd1, 0x92, 0x52, 0x95, 0xd8, 0x6a, 0xd3, 0xae, 0x53, 0x06, 0xd2, 0xbe, 0xd5, 0x44, 0x53, 0x64, 0xcc, 0xe8, 0xd6, 0xa2, 0x53, 0xaf, 0xc7, 0x02, 0xd7, 0xae, 0x54, 0x40, 0xc0, 0xf8, 0xd8, 0xae, 0x54, 0xd7, 0xba, 0xd1, 0xd9, 0x6a, 0x55, 0x91, 0xb4, 0x7b, 0xda, 0x19, 0x56, 0x56, 0xad, 0xfd, 0xda, 0xd5, 0x57, 0x2b, 0xa7, 0x55, 0xdb, 0x86, 0x58, 0x06, 0xa0, 0x98, 0xdc, 0x2f, 0x59, 0x41, 0x99, 0x4d, 0xdc, 0xe6, 0x5a, 0x82, 0x91, 0xf7, 0xdd, 0x71, 0x5c, 0xa1, 0x89, 0xb6, 0xde, 0x54, 0x5e, 0xfe, 0x81, 0x3d, 0xdf, 0x47, 0x62, 0x12, 0x77, 0xdb, 0xe0, 0xd7, 0x65, 0x62, 0x6e, 0x2b, 0x53, 0x47, 0xc3, 0xc7, 0xb3, 0xac, 0x54, 0x42, 0xc4, 0xcc, 0xab, 0x4e, 0x54, 0x8a, 0xc6, 0x40, 0xa3, 0x25, 0x54, 0x73, 0xc7, 0xc3, 0x9a, 0xb0, 0x54, 0x14, 0xc9, 0x3b, 0x92, 0x09, 0x53, 0xe4, 0xca, 0x81, 0x89, 0x8e, 0x53, 0xae, 0xcb, 0xb9, 0x81, 0x19, 0x53, 0x8a, 0xcc, 0xd4, 0x79, 0x6b, 0x53, 0x61, 0xcd, 0xcd, 0x71, 0xd7, 0x53, 0x21, 0xce, 0xed, 0x6a, 0xa8, 0x52, 0xfb, 0xcf, 0xe4, 0x63, 0xa7, 0x52, 0xcc, 0xd0, 0x6b, 0x5c, 0xae, 0x52, 0x1a, 0xd0, 0xdd, 0x55, 0x92, 0x52, 0xab, 0xd0, 0xbb, 0x4e, 0xb7, 0x54, 0x3e, 0xcf, 0xd4, 0x48, 0x55, 0x54, 0xb8, 0xcf, 0x6c, 0x41, 0xcb, 0x56, 0xf8, 0xce, 0xfe, 0x3c, 0xc2, 0x58, 0x9d, 0xbe, 0x5e, 0xb6, 0x1c, 0x5a, 0x7e, 0xbe, 0xb3, 0xad, 0x54, 0x5b, 0x63, 0xbf, 0xc4, 0xa5, 0x05, 0x5b, 0xde, 0xc1, 0x07, 0x9c, 0x94, 0x5c, 0x00, 0xc2, 0x6b, 0x93, 0xd5, 0x5c, 0x1d, 0xc3, 0xc8, 0x8b, 0x37, 0x5c, 0x36, 0xc5, 0x1a, 0x82, 0xac, 0x5c, 0x1e, 0xc6, 0xa6, 0x7a, 0xc3, 0x5b, 0xf8, 0xc8, 0x25, 0x73, 0x1e, 0x5c, 0x05, 0xc9, 0x7f, 0x6b, 0xe8, 0x5c, 0x1d, 0xca, 0xc7, 0x65, 0x0d, 0x5c, 0x2e, 0xcb, 0xc5, 0x5e, 0x2f, 0x5c, 0x64, 0xcb, 0xc2, 0x56, 0xb4, 0x5c, 0xa5, 0xcb, 0xb5, 0x4f, 0x77, 0x5d, 0xa3, 0xcb, 0x1c, 0x49, 0x89, 0x5e, 0xa5, 0xca, 0x8b, 0x43, 0x92, 0x60, 0x13, 0xca, 0x40, 0x3e, 0x2a, 0x5d, 0x53, 0xb9, 0x16, 0xb8, 0xd8, 0x5f, 0xb4, 0xb9, 0x0e, 0xaf, 0x86, 0x61, 0x24, 0xb9, 0xbe, 0xa7, 0x18, 0x62, 0x72, 0xba, 0x85, 0x9e, 0xa2, 0x63, 0x52, 0xbb, 0xa8, 0x95, 0xdd, 0x63, 0xf3, 0xbc, 0xf5, 0x8d, 0x28, 0x64, 0x4c, 0xbe, 0x7c, 0x84, 0x7a, 0x64, 0x60, 0xc0, 0x57, 0x7c, 0x4c, 0x64, 0x40, 0xc2, 0x3a, 0x74, 0xbe, 0x64, 0x4d, 0xc3, 0xe2, 0x6d, 0x6b, 0x64, 0x94, 0xc5, 0x5a, 0x66, 0x8f, 0x64, 0xbb, 0xc6, 0xcb, 0x5f, 0xe4, 0x65, 0x0e, 0xc6, 0xd1, 0x58, 0x32, 0x65, 0x5b, 0xc6, 0xf3, 0x50, 0x81, 0x66, 0x3f, 0xc6, 0x72, 0x4a, 0x9f, 0x67, 0x27, 0xc5, 0xf6, 0x44, 0xad, 0x68, 0x51, 0xc5, 0x95, 0x3e, 0xe1, 0x62, 0x1b, 0xb4, 0x0b, 0xbc, 0x2b, 0x64, 0xc3, 0xb3, 0xa5, 0xb2, 0x22, 0x66, 0xde, 0xb3, 0xc3, 0xa9, 0x5e, 0x68, 0xd9, 0xb3, 0xff, 0xa0, 0xe2, 0x6a, 0x5c, 0xb4, 0xd9, 0x98, 0x38, 0x6b, 0xac, 0xb5, 0xd6, 0x8f, 0x8e, 0x6c, 0x36, 0xb7, 0x5c, 0x87, 0x15, 0x6c, 0x9a, 0xb9, 0x03, 0x7e, 0xb3, 0x6c, 0xa3, 0xbb, 0x43, 0x77, 0x11, 0x6c, 0x93, 0xbd, 0x80, 0x6f, 0x7d, 0x6c, 0xe7, 0xbf, 0x77, 0x68, 0x5d, 0x6d, 0x32, 0xc1, 0x19, 0x61, 0x98, 0x6d, 0xa1, 0xc1, 0xa9, 0x5a, 0x13, 0x6e, 0x17, 0xc1, 0xfb, 0x52, 0x32, 0x6e, 0xcb, 0xc1, 0xa1, 0x4b, 0xde, 0x6f, 0x8c, 0xc1, 0x1c, 0x45, 0xe0, 0x70, 0x55, 0xc0, 0xa4, 0x3f, 0xd9, 0x66, 0xe1, 0xaf, 0x17, 0xc0, 0xc3, 0x6a, 0x41, 0xae, 0x17, 0xb5, 0x7d, 0x6c, 0xf9, 0xad, 0x93, 0xac, 0x2b, 0x6f, 0x67, 0xad, 0x7a, 0xa3, 0xa2, 0x71, 0x7c, 0xad, 0xea, 0x9b, 0x09, 0x73, 0x41, 0xae, 0xc7, 0x92, 0x56, 0x74, 0x35, 0xb0, 0x2b, 0x89, 0xf1, 0x74, 0xdc, 0xb1, 0xa4, 0x81, 0xb5, 0x75, 0x20, 0xb3, 0xab, 0x79, 0xf8, 0x75, 0x36, 0xb5, 0xd4, 0x72, 0x6d, 0x75, 0x8e, 0xb7, 0xca, 0x6b, 0x07, 0x75, 0xe6, 0xb9, 0xb7, 0x63, 0xe3, 0x76, 0x4c, 0xbb, 0x1f, 0x5c, 0xa2, 0x76, 0xe0, 0xbb, 0xcd, 0x54, 0xf8, 0x77, 0x9f, 0xbc, 0x29, 0x4e, 0x12, 0x78, 0xbd, 0xbc, 0x3b, 0x48, 0x44, 0x7a, 0x14, 0xbc, 0x5a, 0x41, 0x9e, 0x6d, 0x76, 0xa8, 0xd5, 0xc3, 0x8c, 0x70, 0xec, 0xa7, 0xb7, 0xb9, 0x44, 0x74, 0x42, 0xa6, 0xc6, 0xaf, 0xba, 0x76, 0xd9, 0xa6, 0x8e, 0xa7, 0x02, 0x79, 0x31, 0xa6, 0x89, 0x9e, 0x5a, 0x7b, 0x03, 0xa7, 0x4f, 0x95, 0xdf, 0x7c, 0x80, 0xa8, 0x58, 0x8d, 0x78, 0x7d, 0x59, 0xa9, 0xe8, 0x85, 0x4a, 0x7d, 0xea, 0xab, 0xb0, 0x7d, 0x53, 0x7e, 0x04, 0xad, 0xee, 0x75, 0xb9, 0x7e, 0x1a, 0xb0, 0x22, 0x6e, 0x18, 0x7e, 0xa5, 0xb1, 0xf3, 0x66, 0xa3, 0x7f, 0x0e, 0xb3, 0xc6, 0x5f, 0x68, 0x7f, 0xec, 0xb4, 0xda, 0x58, 0x2f, 0x80, 0xae, 0xb5, 0xce, 0x50, 0xde, 0x81, 0xef, 0xb6, 0x92, 0x4a, 0xf3, 0x83, 0x61, 0xb7, 0x62, 0x44, 0x73, 0x74, 0xb0, 0xa2, 0x23, 0xc6, 0xe8, 0x78, 0xbd, 0xa0, 0xca, 0xbc, 0xa9, 0x7b, 0xfc, 0xa0, 0x02, 0xb3, 0x91, 0x7e, 0xc3, 0x9f, 0x8f, 0xaa, 0xaa, 0x81, 0x38, 0x9f, 0x50, 0xa1, 0xd7, 0x83, 0x0d, 0x9f, 0xe9, 0x99, 0x71, 0x84, 0xb4, 0xa0, 0xb2, 0x91, 0x31, 0x85, 0xd1, 0xa2, 0x30, 0x89, 0x13, 0x86, 0xb5, 0xa3, 0xc5, 0x80, 0xf4, 0x87, 0x1b, 0xa5, 0xda, 0x79, 0x64, 0x87, 0x4e, 0xa8, 0x05, 0x71, 0xea, 0x87, 0xb1, 0xa9, 0xf8, 0x6a, 0x26, 0x88, 0x16, 0xab, 0xda, 0x62, 0x74, 0x88, 0xe9, 0xad, 0x90, 0x5b, 0x37, 0x89, 0xdc, 0xae, 0xf9, 0x54, 0x08, 0x8b, 0x03, 0xb0, 0x48, 0x4d, 0x71, 0x8c, 0x89, 0xb1, 0x9e, 0x47, 0x36, 0x7d, 0x4d, 0x9a, 0xb7, 0xca, 0xcc, 0x81, 0xba, 0x99, 0x36, 0xc0, 0xd0, 0x84, 0xca, 0x98, 0x97, 0xb7, 0xff, 0x87, 0xb4, 0x98, 0x11, 0xaf, 0x33, 0x8a, 0x2c, 0x97, 0xfe, 0xa6, 0x7f, 0x8c, 0x51, 0x98, 0x38, 0x9d, 0xeb, 0x8d, 0xba, 0x99, 0x1b, 0x95, 0x9c, 0x8e, 0xdb, 0x9a, 0x46, 0x8d, 0x55, 0x8f, 0x94, 0x9b, 0xf9, 0x85, 0x25, 0x90, 0x28, 0x9d, 0xdb, 0x7d, 0x2d, 0x90, 0x79, 0xa0, 0x0d, 0x75, 0xa5, 0x90, 0xe3, 0xa2, 0x0f, 0x6e, 0x20, 0x91, 0x98, 0xa3, 0xad, 0x66, 0x27, 0x92, 0x54, 0xa5, 0x73, 0x5e, 0x96, 0x93, 0x82, 0xa7, 0x68, 0x57, 0xbc, 0x94, 0xa4, 0xa9, 0x1f, 0x50, 0xcd, 0x96, 0x1b, 0xaa, 0xf8, 0x4a, 0x2d, 0x85, 0xd4, 0x93, 0x5f, 0xcf, 0x44, 0x89, 0xe5, 0x92, 0x1c, 0xc5, 0x81, 0x8d, 0x5d, 0x91, 0x28, 0xbc, 0x82, 0x90, 0x5f, 0x90, 0x90, 0xb3, 0x98, 0x93, 0x04, 0x90, 0x8b, 0xaa, 0xee, 0x95, 0x4c, 0x90, 0xd1, 0xa2, 0x66, 0x96, 0xd5, 0x91, 0x89, 0x9a, 0x16, 0x98, 0x07, 0x92, 0x66, 0x91, 0xd2, 0x98, 0xd7, 0x93, 0xe2, 0x89, 0xb4, 0x99, 0x83, 0x95, 0x85, 0x81, 0xaa, 0x9a, 0x1e, 0x97, 0x8c, 0x79, 0xf5, 0x9a, 0x80, 0x99, 0xb9, 0x72, 0x79, 0x9b, 0x30, 0x9b, 0xb0, 0x6a, 0xb2, 0x9c, 0x0e, 0x9d, 0x57, 0x62, 0xa3, 0x9d, 0x3d, 0x9f, 0x70, 0x5b, 0x42, 0x9e, 0x99, 0xa1, 0xa2, 0x54, 0x5d, 0xa0, 0x19, 0xa3, 0xda, 0x4d, 0x2d, 0x8f, 0x02, 0x8b, 0xe9, 0xd3, 0x3f, 0x93, 0x25, 0x8a, 0xea, 0xca, 0x0e, 0x96, 0xd2, 0x8a, 0x17, 0xc1, 0x4c, 0x99, 0xed, 0x89, 0xa6, 0xb8, 0x82, 0x9c, 0xd4, 0x89, 0x49, 0xaf, 0xbb, 0x9e, 0xce, 0x89, 0xa4, 0xa7, 0x48, 0xa0, 0x87, 0x8a, 0x15, 0x9e, 0xe0, 0xa1, 0x7e, 0x8a, 0xf3, 0x96, 0x9f, 0xa2, 0x53, 0x8b, 0xf5, 0x8e, 0x6b, 0xa2, 0xe2, 0x8d, 0x81, 0x86, 0x6f, 0xa3, 0x7e, 0x8f, 0x39, 0x7e, 0x73, 0xa4, 0x59, 0x91, 0x53, 0x76, 0xad, 0xa5, 0x08, 0x93, 0x70, 0x6f, 0x1a, 0xa6, 0x08, 0x95, 0x97, 0x67, 0x64, 0xa7, 0x01, 0x97, 0x92, 0x5f, 0xa9, 0xa8, 0x60, 0x9a, 0x17, 0x58, 0x75, 0xa9, 0xca, 0x9c, 0x8c, 0x50, 0xbd, 0x99, 0xa7, 0x84, 0x3d, 0xd7, 0x5a, 0x9d, 0x5e, 0x83, 0xb9, 0xce, 0x20, 0xa0, 0x9a, 0x83, 0x20, 0xc5, 0xbe, 0xa3, 0x94, 0x82, 0xa8, 0xbd, 0x3f, 0xa6, 0x43, 0x82, 0x57, 0xb4, 0x6e, 0xa8, 0x50, 0x82, 0x69, 0xab, 0xed, 0xa9, 0xc2, 0x82, 0xdd, 0xa3, 0xa1, 0xaa, 0xd3, 0x83, 0x9e, 0x9b, 0x6a, 0xab, 0x9e, 0x84, 0x9e, 0x93, 0x42, 0xac, 0x50, 0x85, 0xcf, 0x8b, 0x4a, 0xac, 0xed, 0x87, 0x2a, 0x83, 0x71, 0xad, 0xc5, 0x89, 0x32, 0x7b, 0x93, 0xae, 0xb8, 0x8b, 0x82, 0x73, 0xbc, 0xaf, 0xc9, 0x8d, 0xb8, 0x6b, 0xfc, 0xb1, 0x04, 0x8f, 0xce, 0x64, 0x3c, 0xb2, 0x3f, 0x92, 0x47, 0x5c, 0xc0, 0xb3, 0xaf, 0x95, 0x45, 0x54, 0xae, 0xa4, 0x8c, 0x7c, 0x2b, 0xdb, 0x77, 0xa7, 0xa3, 0x7c, 0x1b, 0xd2, 0x75, 0xaa, 0x9e, 0x7b, 0xc9, 0xca, 0x00, 0xad, 0x79, 0x7b, 0x6f, 0xc1, 0xa5, 0xaf, 0xe9, 0x7b, 0x31, 0xb9, 0x1d, 0xb2, 0x0e, 0x7a, 0xfb, 0xb0, 0x96, 0xb3, 0x43, 0x7b, 0x7d, 0xa8, 0x72, 0xb4, 0x59, 0x7c, 0x06, 0xa0, 0x56, 0xb4, 0xfd, 0x7d, 0x35, 0x98, 0x38, 0xb5, 0x8b, 0x7e, 0x5f, 0x90, 0x15, 0xb6, 0x52, 0x7f, 0x8d, 0x88, 0x44, 0xb7, 0x16, 0x80, 0x80, 0x80, 0x80, 0xb8, 0x39, 0x83, 0x4d, 0x78, 0xa3, 0xb9, 0x45, 0x85, 0xc0, 0x70, 0xd1, 0xba, 0xd0, 0x88, 0x3c, 0x69, 0x1c, 0xbc, 0x65, 0x8a, 0x8f, 0x61, 0x59, 0xbd, 0xf0, 0x8d, 0x76, 0x59, 0x37, 0xae, 0xd4, 0x73, 0xe1, 0xdf, 0x32, 0xb1, 0xe2, 0x73, 0xf4, 0xd6, 0x29, 0xb4, 0x99, 0x73, 0xe9, 0xcd, 0xb6, 0xb7, 0x0f, 0x73, 0xbe, 0xc5, 0x69, 0xb9, 0x40, 0x73, 0x9f, 0xbd, 0x26, 0xba, 0xfd, 0x73, 0xb3, 0xb4, 0xf0, 0xbc, 0x6d, 0x74, 0x07, 0xac, 0xe8, 0xbd, 0x79, 0x74, 0xb1, 0xa5, 0x0e, 0xbe, 0x59, 0x75, 0x93, 0x9d, 0x2e, 0xbe, 0xf4, 0x76, 0xdb, 0x95, 0x3e, 0xbf, 0x91, 0x78, 0x3b, 0x8d, 0x53, 0xc0, 0x58, 0x79, 0xb5, 0x85, 0x8b, 0xc1, 0x3e, 0x7b, 0x65, 0x7d, 0xb7, 0xc2, 0x6e, 0x7d, 0xdf, 0x75, 0xc3, 0xc3, 0xc1, 0x80, 0x60, 0x6d, 0xde, 0xc5, 0x5a, 0x83, 0x4d, 0x66, 0x0f, 0xc7, 0x01, 0x86, 0x5d, 0x5d, 0xa0, 0xb8, 0x7b, 0x6c, 0x2e, 0xe1, 0x80, 0xbb, 0x5d, 0x6c, 0x5d, 0xd8, 0xa5, 0xbd, 0xe7, 0x6c, 0x70, 0xd0, 0x79, 0xc0, 0x24, 0x6c, 0x61, 0xc8, 0x80, 0xc2, 0x3b, 0x6c, 0x4c, 0xc0, 0xae, 0xc3, 0xaa, 0x6c, 0x94, 0xb8, 0xfb, 0xc4, 0xf4, 0x6c, 0xde, 0xb1, 0x49, 0xc5, 0xf8, 0x6d, 0x95, 0xa9, 0xa7, 0xc6, 0xe7, 0x6e, 0x56, 0xa2, 0x02, 0xc7, 0xa7, 0x6f, 0x7d, 0x9a, 0x3d, 0xc8, 0x52, 0x70, 0xcc, 0x92, 0x74, 0xc9, 0x08, 0x72, 0x86, 0x8a, 0xa6, 0xc9, 0xc5, 0x74, 0x3a, 0x82, 0xd9, 0xca, 0xe1, 0x76, 0x85, 0x7a, 0xe2, 0xcc, 0x2c, 0x79, 0x0c, 0x72, 0xe8, 0xcd, 0x90, 0x7b, 0xf4, 0x6a, 0xcd, 0xcf, 0x50, 0x7f, 0x62, 0x61, 0xfb, 0xc2, 0x1c, 0x65, 0x4d, 0xe2, 0x81, 0xc4, 0xa3, 0x65, 0x5d, 0xda, 0x34, 0xc6, 0xde, 0x65, 0x5b, 0xd2, 0xb0, 0xc8, 0xda, 0x65, 0x66, 0xcb, 0x56, 0xca, 0xa3, 0x65, 0x6e, 0xc4, 0x0f, 0xcc, 0x15, 0x65, 0xad, 0xbc, 0xd0, 0xcd, 0x27, 0x66, 0x2f, 0xb5, 0x8a, 0xce, 0x23, 0x66, 0xce, 0xae, 0x3f, 0xcf, 0x00, 0x67, 0xd0, 0xa6, 0xee, 0xcf, 0xd4, 0x68, 0xc9, 0x9f, 0x8d, 0xd0, 0x73, 0x6a, 0x34, 0x97, 0xe4, 0xd1, 0x14, 0x6b, 0x8d, 0x90, 0x34, 0xd1, 0xe9, 0x6d, 0x5c, 0x88, 0x2c, 0xd2, 0xc8, 0x6f, 0x0b, 0x80, 0x30, 0xd4, 0x5c, 0x71, 0xf6, 0x77, 0xcc, 0xd5, 0xcb, 0x74, 0xc9, 0x6f, 0x93, 0xd7, 0x64, 0x78, 0x69, 0x66, 0x03, 0xcb, 0xbc, 0x5e, 0x32, 0xe2, 0x77, 0xcd, 0xcb, 0x5e, 0x7a, 0xdb, 0x5e, 0xcf, 0xc5, 0x5e, 0xc1, 0xd4, 0x9b, 0xd1, 0xb8, 0x5e, 0xd5, 0xcd, 0xfc, 0xd3, 0x35, 0x5e, 0xde, 0xc7, 0x3c, 0xd4, 0x87, 0x5e, 0xf9, 0xc0, 0x73, 0xd5, 0x7d, 0x5f, 0x79, 0xb9, 0x97, 0xd6, 0x63, 0x5f, 0xff, 0xb2, 0xa8, 0xd7, 0x2b, 0x61, 0x06, 0xab, 0x9e, 0xd7, 0xe3, 0x62, 0x35, 0xa4, 0x7a, 0xd8, 0x8f, 0x63, 0x7b, 0x9d, 0x2e, 0xd9, 0x2f, 0x64, 0xf2, 0x95, 0xae, 0xd9, 0xcf, 0x66, 0x8a, 0x8d, 0xfd, 0xda, 0x80, 0x68, 0x8e, 0x85, 0xd0, 0xdb, 0x76, 0x6a, 0xbc, 0x7d, 0x71, 0xdd, 0x45, 0x6d, 0xda, 0x74, 0x89, 0xdf, 0x3e, 0x71, 0x46, 0x6b, 0x45, 0xd5, 0xe3, 0x57, 0x51, 0xe2, 0xf8, 0xd8, 0x09, 0x57, 0x81, 0xdc, 0xbb, 0xd9, 0xfc, 0x57, 0xd1, 0xd6, 0xc2, 0xdb, 0xcf, 0x58, 0x1f, 0xd0, 0xca, 0xdd, 0x03, 0x58, 0x98, 0xca, 0x90, 0xdd, 0xc9, 0x59, 0x4f, 0xc4, 0x21, 0xde, 0x7a, 0x5a, 0x01, 0xbd, 0x97, 0xdf, 0x3c, 0x5a, 0x9c, 0xb7, 0x04, 0xdf, 0xd4, 0x5b, 0x47, 0xb0, 0x53, 0xe0, 0x66, 0x5c, 0x2b, 0xa9, 0x6f, 0xe0, 0xfa, 0x5d, 0x17, 0xa2, 0x77, 0xe1, 0x9c, 0x5e, 0x49, 0x9b, 0x2d, 0xe2, 0x54, 0x5f, 0x92, 0x93, 0xbe, 0xe2, 0xfc, 0x61, 0x7a, 0x8b, 0xd0, 0xe3, 0xb9, 0x63, 0xa0, 0x83, 0x88, 0xe4, 0xb7, 0x66, 0x5a, 0x7a, 0xab, 0xe6, 0x54, 0x69, 0xb7, 0x71, 0x21, 0x5a, 0x8c, 0xc8, 0x37, 0xb7, 0x9b, 0x5b, 0xc2, 0xc9, 0x3e, 0xaf, 0x71, 0x5b, 0x8d, 0xcb, 0x52, 0xa7, 0x4b, 0x5b, 0x7c, 0xcd, 0x40, 0x9f, 0x57, 0x5a, 0x9b, 0xcf, 0x13, 0x96, 0x82, 0x5a, 0x83, 0xd0, 0x12, 0x8d, 0xfe, 0x59, 0xdc, 0xd1, 0x45, 0x85, 0x9d, 0x59, 0xd3, 0xd1, 0xf6, 0x7d, 0x9f, 0x59, 0xec, 0xd2, 0x8d, 0x76, 0x3b, 0x59, 0xf0, 0xd3, 0x24, 0x6e, 0xfc, 0x59, 0x9c, 0xd3, 0xf2, 0x68, 0x6a, 0x59, 0x3e, 0xd4, 0xb1, 0x61, 0xdc, 0x58, 0x6b, 0xd5, 0x16, 0x5a, 0xfc, 0x58, 0xe6, 0xd4, 0xbc, 0x54, 0x22, 0x58, 0x41, 0xd4, 0xd0, 0x4d, 0x58, 0x5a, 0xa6, 0xd3, 0x64, 0x47, 0x4b, 0x5b, 0x13, 0xd2, 0xe1, 0x40, 0xcf, 0x60, 0x0f, 0xc3, 0x00, 0xba, 0x1d, 0x61, 0xd5, 0xc3, 0x80, 0xb1, 0x89, 0x62, 0x8f, 0xc4, 0xae, 0xa9, 0x4b, 0x62, 0xee, 0xc6, 0x2c, 0xa1, 0x29, 0x63, 0x27, 0xc7, 0x6d, 0x98, 0x86, 0x63, 0x4d, 0xc8, 0x9c, 0x8f, 0xcd, 0x63, 0x56, 0xc9, 0xe4, 0x87, 0x5a, 0x63, 0x49, 0xcb, 0x31, 0x7f, 0x0b, 0x63, 0x12, 0xcc, 0xa8, 0x77, 0x8e, 0x62, 0xdd, 0xcd, 0xfc, 0x70, 0x0d, 0x62, 0xd5, 0xcf, 0x4e, 0x69, 0x96, 0x62, 0xb4, 0xd0, 0x5c, 0x63, 0x3c, 0x62, 0x9b, 0xd0, 0xbc, 0x5c, 0x83, 0x62, 0x8f, 0xd0, 0x93, 0x55, 0x33, 0x62, 0xc0, 0xd0, 0x42, 0x4e, 0x50, 0x63, 0x88, 0xcf, 0x58, 0x48, 0x6b, 0x64, 0x5b, 0xce, 0x70, 0x42, 0x50, 0x64, 0xa7, 0xbe, 0x7f, 0xbd, 0xa8, 0x67, 0x12, 0xbe, 0x7b, 0xb3, 0xd6, 0x68, 0x9f, 0xbe, 0xf8, 0xab, 0x47, 0x69, 0xa2, 0xbf, 0xf6, 0xa2, 0xe0, 0x6a, 0x5e, 0xc0, 0xfb, 0x9a, 0x54, 0x6a, 0xf6, 0xc2, 0x08, 0x91, 0xab, 0x6b, 0x46, 0xc3, 0x4c, 0x89, 0x1d, 0x6b, 0x77, 0xc4, 0x9b, 0x80, 0x97, 0x6b, 0x5d, 0xc6, 0x50, 0x79, 0x38, 0x6b, 0x26, 0xc7, 0xec, 0x71, 0xe3, 0x6b, 0x1f, 0xc9, 0x73, 0x6b, 0x28, 0x6b, 0x0b, 0xca, 0xfc, 0x64, 0xb8, 0x6a, 0xe1, 0xcc, 0x3c, 0x5e, 0x2b, 0x6a, 0xf2, 0xcb, 0xf5, 0x56, 0x9b, 0x6b, 0x15, 0xcb, 0xc0, 0x4f, 0x4c, 0x6b, 0xeb, 0xca, 0xce, 0x49, 0x6e, 0x6c, 0xbd, 0xc9, 0xea, 0x43, 0x44, 0x69, 0x63, 0xb9, 0x9f, 0xc1, 0x49, 0x6c, 0x1f, 0xb9, 0x3f, 0xb6, 0x9d, 0x6e, 0x3f, 0xb9, 0x15, 0xad, 0xa2, 0x6f, 0xd1, 0xb9, 0xa2, 0xa5, 0x24, 0x71, 0x3d, 0xba, 0x59, 0x9c, 0x9f, 0x72, 0x50, 0xbb, 0x60, 0x93, 0xf7, 0x72, 0xf0, 0xbc, 0xac, 0x8b, 0x64, 0x73, 0x32, 0xbe, 0x2b, 0x82, 0xbf, 0x73, 0x3a, 0xc0, 0x1f, 0x7a, 0xfd, 0x73, 0x40, 0xc1, 0xdd, 0x73, 0xd6, 0x73, 0x4d, 0xc3, 0x7c, 0x6c, 0xee, 0x73, 0x62, 0xc5, 0x0c, 0x66, 0x62, 0x73, 0x50, 0xc6, 0xa9, 0x60, 0x01, 0x73, 0x83, 0xc6, 0xa1, 0x58, 0x6b, 0x73, 0xad, 0xc6, 0xaf, 0x50, 0xc6, 0x74, 0x72, 0xc6, 0x04, 0x4a, 0xc6, 0x75, 0x55, 0xc5, 0x63, 0x44, 0x63, 0x6e, 0x49, 0xb4, 0xd5, 0xc5, 0x7a, 0x71, 0x95, 0xb3, 0xec, 0xb9, 0xd7, 0x74, 0x3c, 0xb3, 0x36, 0xb0, 0x4b, 0x76, 0x55, 0xb3, 0x56, 0xa7, 0xd3, 0x78, 0x48, 0xb3, 0x87, 0x9f, 0x64, 0x79, 0xc0, 0xb4, 0x68, 0x96, 0xb8, 0x7a, 0xeb, 0xb5, 0x6e, 0x8e, 0x2c, 0x7b, 0x68, 0xb6, 0xe1, 0x85, 0xed, 0x7b, 0xb4, 0xb8, 0x87, 0x7d, 0xe3, 0x7b, 0xa9, 0xba, 0x9e, 0x76, 0x8e, 0x7b, 0x90, 0xbc, 0xb0, 0x6f, 0x4e, 0x7b, 0xa2, 0xbe, 0xb4, 0x68, 0x63, 0x7b, 0xa2, 0xc0, 0x88, 0x61, 0xbd, 0x7b, 0xeb, 0xc1, 0x13, 0x5a, 0x81, 0x7c, 0x43, 0xc1, 0x43, 0x52, 0xf7, 0x7d, 0x10, 0xc1, 0x39, 0x4c, 0x83, 0x7e, 0x68, 0xc1, 0x1b, 0x45, 0x8d, 0x74, 0x68, 0xaf, 0x72, 0xc8, 0xa6, 0x78, 0x54, 0xad, 0xd5, 0xbc, 0xf3, 0x7b, 0x14, 0xad, 0x21, 0xb3, 0xac, 0x7d, 0x70, 0xac, 0xd9, 0xaa, 0xf7, 0x7f, 0x90, 0xac, 0xc0, 0xa2, 0x72, 0x81, 0x52, 0xad, 0x55, 0x99, 0xe0, 0x82, 0xda, 0xae, 0x2e, 0x91, 0x40, 0x83, 0x87, 0xaf, 0xa5, 0x89, 0x11, 0x84, 0x0c, 0xb1, 0x1a, 0x80, 0xfa, 0x84, 0x2d, 0xb3, 0x18, 0x79, 0x94, 0x84, 0x2c, 0xb5, 0x20, 0x72, 0x54, 0x84, 0x52, 0xb7, 0x07, 0x6b, 0x27, 0x84, 0x6c, 0xb8, 0xea, 0x64, 0x26, 0x84, 0xa7, 0xba, 0x5c, 0x5d, 0x1e, 0x85, 0x38, 0xba, 0xf7, 0x55, 0xc8, 0x85, 0xe9, 0xbb, 0x88, 0x4e, 0xc1, 0x87, 0x44, 0xbc, 0x2f, 0x48, 0x25, 0x7c, 0x20, 0xa8, 0x52, 0xcb, 0xf3, 0x80, 0x8b, 0xa6, 0xa7, 0xc0, 0x1b, 0x83, 0x1a, 0xa6, 0x36, 0xb7, 0x68, 0x85, 0x96, 0xa5, 0xd4, 0xae, 0xb8, 0x87, 0xbf, 0xa5, 0xbd, 0xa6, 0x18, 0x89, 0xa5, 0xa5, 0xf4, 0x9d, 0x8e, 0x8b, 0x00, 0xa6, 0xce, 0x95, 0x28, 0x8c, 0x04, 0xa7, 0xf7, 0x8c, 0xdb, 0x8c, 0x95, 0xa9, 0x92, 0x84, 0xbc, 0x8c, 0xe2, 0xab, 0x6b, 0x7c, 0xea, 0x8c, 0xdf, 0xad, 0x9f, 0x75, 0x8b, 0x8c, 0xd5, 0xaf, 0xc5, 0x6e, 0x2a, 0x8d, 0x36, 0xb1, 0x74, 0x66, 0xe5, 0x8d, 0x76, 0xb3, 0x24, 0x5f, 0xce, 0x8e, 0x4b, 0xb4, 0x1c, 0x58, 0xb6, 0x8f, 0x09, 0xb4, 0xfd, 0x51, 0x8f, 0x90, 0x5f, 0xb6, 0x34, 0x4a, 0xe6, 0x83, 0xc7, 0xa1, 0xa3, 0xcf, 0x20, 0x87, 0xea, 0xa0, 0x37, 0xc4, 0xbe, 0x8b, 0x3f, 0x9f, 0x44, 0xbb, 0x9f, 0x8d, 0xef, 0x9e, 0xb8, 0xb2, 0xc4, 0x90, 0x40, 0x9e, 0x8b, 0xaa, 0x0f, 0x92, 0x4b, 0x9e, 0x9d, 0xa1, 0x70, 0x93, 0x81, 0x9f, 0x6b, 0x99, 0x07, 0x94, 0x83, 0xa0, 0x55, 0x90, 0xa1, 0x95, 0x40, 0xa1, 0xe8, 0x88, 0xa5, 0x95, 0xde, 0xa3, 0x7f, 0x80, 0xa8, 0x96, 0x12, 0xa5, 0x98, 0x79, 0x4d, 0x96, 0x1d, 0xa7, 0xc2, 0x72, 0x04, 0x96, 0x7b, 0xa9, 0x8a, 0x6a, 0x78, 0x96, 0xe5, 0xab, 0x2f, 0x62, 0xf3, 0x97, 0xba, 0xac, 0xcd, 0x5b, 0xe1, 0x98, 0xdf, 0xae, 0x3c, 0x55, 0x00, 0x9a, 0x29, 0xaf, 0xaf, 0x4d, 0xc7, 0x8b, 0xe7, 0x9a, 0xa0, 0xd3, 0x21, 0x90, 0x32, 0x99, 0x1f, 0xc9, 0x33, 0x94, 0x0b, 0x97, 0xfa, 0xc0, 0x1f, 0x96, 0xc1, 0x97, 0x8b, 0xb7, 0x4b, 0x99, 0x40, 0x97, 0x3e, 0xae, 0x9d, 0x9b, 0x33, 0x97, 0x7a, 0xa6, 0x13, 0x9c, 0xc6, 0x97, 0xe8, 0x9d, 0x95, 0x9d, 0xab, 0x98, 0xc3, 0x95, 0x2c, 0x9e, 0x6a, 0x99, 0xec, 0x8c, 0xee, 0x9e, 0xfb, 0x9b, 0x8d, 0x85, 0x06, 0x9f, 0x64, 0x9d, 0x63, 0x7d, 0x55, 0x9f, 0x6b, 0x9f, 0x9f, 0x76, 0x13, 0x9f, 0x91, 0xa1, 0xb6, 0x6e, 0xce, 0xa0, 0x74, 0xa3, 0x25, 0x66, 0xe1, 0xa1, 0x44, 0xa4, 0xb0, 0x5f, 0x40, 0xa2, 0x98, 0xa6, 0xb0, 0x58, 0x6c, 0xa3, 0xf8, 0xa8, 0xa4, 0x50, 0xe7, 0x95, 0x87, 0x93, 0x1a, 0xd6, 0xdc, 0x99, 0x94, 0x91, 0xf3, 0xcd, 0x1b, 0x9c, 0xc7, 0x91, 0x18, 0xc4, 0x58, 0x9f, 0x9e, 0x90, 0x89, 0xbb, 0xb7, 0xa2, 0x5b, 0x90, 0x24, 0xb3, 0x23, 0xa4, 0x60, 0x90, 0x35, 0xaa, 0x9c, 0xa5, 0xf5, 0x90, 0x87, 0xa2, 0x14, 0xa6, 0xe3, 0x91, 0x55, 0x99, 0xc6, 0xa7, 0x9a, 0x92, 0x45, 0x91, 0x85, 0xa8, 0x25, 0x93, 0xbf, 0x89, 0x99, 0xa8, 0x96, 0x95, 0x50, 0x81, 0xbe, 0xa9, 0x19, 0x97, 0x43, 0x7a, 0x26, 0xa9, 0x6e, 0x99, 0x53, 0x72, 0xc6, 0xaa, 0x12, 0x9b, 0x3f, 0x6b, 0x31, 0xaa, 0xf2, 0x9c, 0xf8, 0x63, 0x64, 0xac, 0x15, 0x9f, 0x00, 0x5b, 0xe1, 0xad, 0x8f, 0xa1, 0x52, 0x54, 0x11, 0xa0, 0x40, 0x8b, 0x60, 0xda, 0xd5, 0xa3, 0xc5, 0x8a, 0xbb, 0xd1, 0x3a, 0xa6, 0xbd, 0x8a, 0x1e, 0xc8, 0xc9, 0xa9, 0x9b, 0x89, 0x88, 0xc0, 0x70, 0xac, 0x0b, 0x89, 0x33, 0xb7, 0xd7, 0xae, 0x33, 0x88, 0xf5, 0xaf, 0x4b, 0xaf, 0x61, 0x89, 0x5c, 0xa6, 0xdd, 0xb0, 0x5e, 0x89, 0xe6, 0x9e, 0x78, 0xb0, 0xec, 0x8a, 0xf6, 0x96, 0x4e, 0xb1, 0x6d, 0x8c, 0x18, 0x8e, 0x35, 0xb1, 0xdb, 0x8d, 0x8a, 0x86, 0x65, 0xb2, 0x4a, 0x8f, 0x29, 0x7e, 0x94, 0xb3, 0x28, 0x91, 0x33, 0x76, 0xe4, 0xb3, 0xe0, 0x93, 0x2e, 0x6f, 0x61, 0xb5, 0x07, 0x95, 0x3a, 0x67, 0xcb, 0xb6, 0x1a, 0x97, 0x27, 0x60, 0x21, 0xb7, 0x92, 0x9a, 0x03, 0x57, 0x84, 0xaa, 0x64, 0x84, 0x42, 0xdf, 0x1c, 0xad, 0x81, 0x83, 0xda, 0xd5, 0xed, 0xb0, 0x67, 0x83, 0x62, 0xcd, 0x3f, 0xb3, 0x13, 0x82, 0xeb, 0xc4, 0xd2, 0xb5, 0x71, 0x82, 0x82, 0xbc, 0x55, 0xb7, 0x67, 0x82, 0x45, 0xb3, 0xcf, 0xb8, 0xc1, 0x82, 0x6b, 0xab, 0x81, 0xb9, 0xae, 0x82, 0xd6, 0xa3, 0x57, 0xba, 0x52, 0x83, 0xa7, 0x9b, 0x33, 0xba, 0xc2, 0x84, 0xbb, 0x93, 0x10, 0xbb, 0x52, 0x85, 0xe2, 0x8b, 0x2c, 0xbb, 0xdf, 0x87, 0x1c, 0x83, 0x6e, 0xbc, 0xab, 0x88, 0xfc, 0x7b, 0xa9, 0xbd, 0xac, 0x8b, 0x29, 0x73, 0xea, 0xbe, 0xfa, 0x8d, 0x51, 0x6c, 0x2f, 0xc0, 0x94, 0x8f, 0x84, 0x64, 0x67, 0xc1, 0xea, 0x92, 0x23, 0x5c, 0x4a, 0xb4, 0x00, 0x7d, 0x0f, 0xe3, 0x23, 0xb7, 0x12, 0x7c, 0xa4, 0xd9, 0xe0, 0xb9, 0xda, 0x7c, 0x49, 0xd1, 0x49, 0xbc, 0x6c, 0x7b, 0xc2, 0xc8, 0xca, 0xbe, 0xda, 0x7b, 0x2b, 0xc0, 0x52, 0xc0, 0x7e, 0x7b, 0x22, 0xb8, 0x0c, 0xc1, 0xf1, 0x7b, 0x28, 0xaf, 0xdc, 0xc2, 0xc0, 0x7b, 0xae, 0xa7, 0xf8, 0xc3, 0x83, 0x7c, 0x2e, 0xa0, 0x12, 0xc3, 0xe5, 0x7d, 0x5c, 0x98, 0x05, 0xc4, 0x35, 0x7e, 0x7f, 0x8f, 0xeb, 0xc4, 0xfd, 0x7f, 0x9c, 0x88, 0x32, 0xc5, 0xbc, 0x80, 0x80, 0x80, 0x80, 0xc6, 0xd5, 0x83, 0x22, 0x78, 0xc5, 0xc7, 0xdd, 0x85, 0x74, 0x71, 0x14, 0xc9, 0x55, 0x88, 0x39, 0x69, 0x40, 0xcb, 0x01, 0x8b, 0x3e, 0x60, 0x6a, 0xbd, 0xd6, 0x74, 0xed, 0xe5, 0xe7, 0xc0, 0xc3, 0x74, 0xa8, 0xdc, 0xbb, 0xc3, 0x3f, 0x74, 0x76, 0xd4, 0x6e, 0xc5, 0x87, 0x74, 0x35, 0xcc, 0x3b, 0xc7, 0x91, 0x73, 0xee, 0xc4, 0x18, 0xc9, 0x29, 0x73, 0xdd, 0xbc, 0x19, 0xca, 0x52, 0x73, 0xf8, 0xb4, 0x34, 0xcb, 0x4b, 0x74, 0x58, 0xac, 0x69, 0xcc, 0x15, 0x75, 0x02, 0xa4, 0xb7, 0xcc, 0xbd, 0x75, 0xeb, 0x9c, 0xf9, 0xcd, 0x36, 0x77, 0x30, 0x95, 0x23, 0xcd, 0xc1, 0x78, 0x82, 0x8d, 0x4c, 0xce, 0x7d, 0x79, 0xe1, 0x85, 0x89, 0xcf, 0x60, 0x7b, 0x7d, 0x7d, 0xba, 0xd0, 0xb5, 0x7d, 0xd6, 0x75, 0xd8, 0xd2, 0x1d, 0x80, 0x4f, 0x6d, 0xeb, 0xd3, 0xa5, 0x83, 0xda, 0x65, 0x27, 0xc7, 0x4d, 0x6d, 0x43, 0xe7, 0xa8, 0xca, 0x15, 0x6d, 0x18, 0xde, 0x86, 0xcc, 0x4a, 0x6d, 0x07, 0xd6, 0xc1, 0xce, 0x5e, 0x6c, 0xfa, 0xcf, 0x22, 0xd0, 0x21, 0x6c, 0xe4, 0xc7, 0x74, 0xd1, 0xb9, 0x6c, 0xcc, 0xbf, 0xdb, 0xd2, 0x9a, 0x6d, 0x0f, 0xb8, 0x54, 0xd3, 0x74, 0x6d, 0x4e, 0xb0, 0xc9, 0xd4, 0x43, 0x6e, 0x07, 0xa9, 0x4c, 0xd5, 0x0b, 0x6e, 0xc0, 0xa1, 0xc7, 0xd5, 0xb0, 0x6f, 0xe5, 0x9a, 0x21, 0xd6, 0x3e, 0x71, 0x3b, 0x92, 0x83, 0xd6, 0xe9, 0x72, 0xce, 0x8a, 0xba, 0xd7, 0xa8, 0x74, 0x59, 0x82, 0xdc, 0xd8, 0xee, 0x76, 0xb7, 0x7a, 0xb9, 0xda, 0x7a, 0x79, 0x61, 0x72, 0x91, 0xdc, 0x22, 0x7c, 0x62, 0x69, 0xe2, 0xd1, 0x1f, 0x65, 0xb4, 0xe7, 0xe7, 0xd3, 0x92, 0x65, 0xb1, 0xdf, 0xb9, 0xd5, 0x9a, 0x65, 0xaf, 0xd8, 0xc5, 0xd7, 0x98, 0x65, 0xa1, 0xd1, 0xe4, 0xd9, 0x05, 0x65, 0xac, 0xca, 0xcb, 0xda, 0x25, 0x65, 0xcc, 0xc3, 0x96, 0xdb, 0x0e, 0x66, 0x23, 0xbc, 0x68, 0xdb, 0xcf, 0x66, 0xa4, 0xb5, 0x3e, 0xdc, 0x84, 0x67, 0x46, 0xae, 0x0a, 0xdd, 0x2a, 0x68, 0x46, 0xa6, 0xbe, 0xdd, 0xcd, 0x69, 0x3f, 0x9f, 0x63, 0xde, 0x64, 0x6a, 0x97, 0x97, 0xdf, 0xde, 0xf5, 0x6b, 0xe2, 0x90, 0x58, 0xdf, 0x9f, 0x6d, 0x93, 0x88, 0x3d, 0xe0, 0x54, 0x6f, 0x11, 0x80, 0x22, 0xe2, 0x53, 0x72, 0x59, 0x77, 0x77, 0xe4, 0x5b, 0x75, 0x52, 0x6e, 0xe1, 0xdb, 0xc8, 0x5d, 0xd2, 0xe8, 0x56, 0xde, 0x47, 0x5d, 0xbb, 0xe1, 0x52, 0xe0, 0xe2, 0x5d, 0x46, 0xdb, 0x08, 0xe2, 0x6d, 0x5d, 0x7d, 0xd4, 0xbd, 0xe4, 0x13, 0x5d, 0x8f, 0xce, 0x75, 0xe4, 0x30, 0x5e, 0x6f, 0xc7, 0x8b, 0xe4, 0x51, 0x5f, 0x34, 0xc0, 0x89, 0xe4, 0xef, 0x5f, 0xd2, 0xb9, 0xba, 0xe5, 0x8a, 0x60, 0x7b, 0xb2, 0xe2, 0xe6, 0x13, 0x61, 0x84, 0xab, 0xe5, 0xe6, 0x96, 0x62, 0xaf, 0xa4, 0xc8, 0xe7, 0x17, 0x63, 0xe9, 0x9d, 0x7b, 0xe7, 0xa5, 0x65, 0x44, 0x95, 0xf7, 0xe8, 0x48, 0x66, 0xb2, 0x8e, 0x45, 0xe9, 0x31, 0x68, 0x84, 0x86, 0x07, 0xea, 0x42, 0x6a, 0xc9, 0x7d, 0x7a, 0xec, 0x06, 0x6e, 0x1a, 0x74, 0x28, 0x62, 0xa2, 0xcb, 0xfc, 0xba, 0xff, 0x63, 0x5f, 0xcd, 0x42, 0xb3, 0x2f, 0x62, 0x7d, 0xd0, 0x0b, 0xab, 0x16, 0x62, 0x69, 0xd1, 0xe7, 0xa3, 0x56, 0x62, 0x6a, 0xd3, 0x23, 0x9b, 0x40, 0x62, 0x37, 0xd4, 0x14, 0x92, 0xb4, 0x61, 0xee, 0xd4, 0xed, 0x8a, 0x73, 0x61, 0xcb, 0xd5, 0x85, 0x82, 0x59, 0x61, 0xc2, 0xd6, 0x11, 0x7a, 0xe5, 0x61, 0x74, 0xd6, 0xbc, 0x73, 0xa2, 0x61, 0x2e, 0xd7, 0x5c, 0x6c, 0xea, 0x60, 0xcc, 0xd7, 0xfc, 0x66, 0xa8, 0x5f, 0xe3, 0xd8, 0xce, 0x60, 0x50, 0x5f, 0x75, 0xd8, 0xa7, 0x59, 0x92, 0x5f, 0xed, 0xd8, 0x1c, 0x52, 0xda, 0x60, 0x29, 0xd7, 0xa2, 0x4c, 0x61, 0x60, 0xdc, 0xd6, 0xd2, 0x46, 0x44, 0x67, 0xc3, 0xc7, 0x48, 0xbd, 0x6b, 0x69, 0x29, 0xc7, 0xee, 0xb5, 0x5f, 0x6a, 0x36, 0xc8, 0xd2, 0xad, 0x7c, 0x6a, 0xa4, 0xca, 0x3a, 0xa5, 0x63, 0x6a, 0xed, 0xcb, 0xa3, 0x9d, 0x2a, 0x6b, 0x1a, 0xcc, 0xc9, 0x94, 0x6c, 0x6b, 0x1c, 0xce, 0x0c, 0x8b, 0xe0, 0x6a, 0xf9, 0xcf, 0x63, 0x83, 0x8a, 0x6a, 0x9b, 0xd0, 0xa1, 0x7b, 0xed, 0x6a, 0x1f, 0xd1, 0xa9, 0x74, 0xcf, 0x69, 0xb5, 0xd2, 0xa0, 0x6e, 0x09, 0x69, 0x63, 0xd3, 0x8f, 0x67, 0xe7, 0x69, 0x08, 0xd4, 0x67, 0x61, 0xc5, 0x68, 0xc2, 0xd4, 0x7a, 0x5b, 0x04, 0x68, 0x89, 0xd4, 0x42, 0x53, 0xef, 0x68, 0xab, 0xd3, 0xd1, 0x4d, 0x46, 0x69, 0x45, 0xd3, 0x03, 0x47, 0x21, 0x6c, 0x7a, 0xc3, 0x33, 0xbf, 0xf6, 0x6e, 0x49, 0xc3, 0x5f, 0xb7, 0x9d, 0x70, 0x04, 0xc3, 0x83, 0xaf, 0x79, 0x70, 0xfd, 0xc4, 0x6f, 0xa7, 0x2e, 0x71, 0xd2, 0xc5, 0x6c, 0x9e, 0xef, 0x72, 0x63, 0xc6, 0x72, 0x96, 0x4f, 0x72, 0xc6, 0xc7, 0x97, 0x8d, 0xbd, 0x72, 0xd8, 0xc8, 0xeb, 0x85, 0x55, 0x72, 0xbf, 0xca, 0x5e, 0x7d, 0x6f, 0x72, 0x65, 0xcb, 0xe9, 0x76, 0x51, 0x71, 0xf8, 0xcd, 0x6d, 0x6f, 0x45, 0x71, 0xa7, 0xcf, 0x12, 0x69, 0x10, 0x71, 0x41, 0xd0, 0x6c, 0x63, 0x03, 0x70, 0xec, 0xd0, 0xe0, 0x5c, 0x67, 0x70, 0xad, 0xd0, 0xa5, 0x55, 0x08, 0x70, 0xaf, 0xd0, 0x61, 0x4d, 0xf5, 0x71, 0x7f, 0xcf, 0x06, 0x48, 0x05, 0x71, 0x1a, 0xbf, 0x41, 0xc3, 0x05, 0x73, 0x7d, 0xbe, 0xcc, 0xba, 0x0c, 0x75, 0xc1, 0xbe, 0x52, 0xb1, 0x9f, 0x77, 0x35, 0xbe, 0xe3, 0xa9, 0x35, 0x78, 0x69, 0xbf, 0xbc, 0xa0, 0xda, 0x79, 0x44, 0xc0, 0xa5, 0x98, 0x56, 0x7a, 0x03, 0xc1, 0x92, 0x8f, 0xd5, 0x7a, 0x46, 0xc2, 0xd1, 0x87, 0x72, 0x7a, 0x6f, 0xc4, 0x24, 0x7f, 0x3b, 0x7a, 0x4c, 0xc5, 0xc4, 0x78, 0x3f, 0x7a, 0x07, 0xc7, 0x57, 0x71, 0x3f, 0x79, 0xd4, 0xc8, 0xe7, 0x6a, 0xc9, 0x79, 0x8c, 0xca, 0x79, 0x64, 0x85, 0x79, 0x37, 0xcb, 0xa9, 0x5e, 0x1b, 0x79, 0x2e, 0xcb, 0x41, 0x56, 0xc0, 0x79, 0x34, 0xca, 0xf4, 0x4f, 0x86, 0x79, 0xfe, 0xca, 0x2d, 0x49, 0x5a, 0x76, 0xce, 0xb9, 0xa7, 0xc7, 0xc2, 0x79, 0xa9, 0xb8, 0xf6, 0xbc, 0xec, 0x7b, 0xe4, 0xb8, 0x97, 0xb4, 0x58, 0x7d, 0xc2, 0xb8, 0xa7, 0xab, 0xeb, 0x7f, 0x4e, 0xb9, 0x20, 0xa3, 0x8b, 0x80, 0x96, 0xb9, 0xe6, 0x9b, 0x0b, 0x81, 0x98, 0xba, 0xe1, 0x92, 0x75, 0x82, 0x0d, 0xbc, 0x32, 0x8a, 0x1c, 0x82, 0x44, 0xbd, 0xa0, 0x81, 0xbc, 0x82, 0x26, 0xbf, 0x87, 0x7a, 0x62, 0x82, 0x0d, 0xc1, 0x37, 0x73, 0x75, 0x81, 0xf5, 0xc2, 0xce, 0x6c, 0xc3, 0x81, 0xd3, 0xc4, 0x5d, 0x66, 0x52, 0x81, 0x94, 0xc5, 0xed, 0x60, 0x05, 0x81, 0xb7, 0xc5, 0xcf, 0x58, 0xbf, 0x81, 0xda, 0xc5, 0xc4, 0x51, 0x77, 0x82, 0xcd, 0xc5, 0x88, 0x4a, 0x44, 0x7d, 0x22, 0xb4, 0x06, 0xcc, 0x02, 0x80, 0x71, 0xb3, 0x04, 0xbf, 0xc2, 0x82, 0xa2, 0xb2, 0xcd, 0xb7, 0x62, 0x84, 0xc0, 0xb2, 0xa4, 0xaf, 0x05, 0x86, 0x92, 0xb2, 0xce, 0xa6, 0xa3, 0x88, 0x33, 0xb3, 0x24, 0x9e, 0x38, 0x89, 0x5a, 0xb4, 0x00, 0x95, 0xa1, 0x8a, 0x23, 0xb5, 0x13, 0x8d, 0x39, 0x8a, 0x74, 0xb6, 0x79, 0x85, 0x17, 0x8a, 0x94, 0xb8, 0x18, 0x7d, 0x55, 0x8a, 0x6e, 0xba, 0x1c, 0x76, 0x3f, 0x8a, 0x38, 0xbc, 0x16, 0x6f, 0x3d, 0x8a, 0x1b, 0xbd, 0xee, 0x68, 0x71, 0x89, 0xdd, 0xbf, 0xce, 0x61, 0xd3, 0x8a, 0x15, 0xc0, 0x69, 0x5a, 0xbc, 0x8a, 0x70, 0xc0, 0xa4, 0x53, 0x68, 0x8b, 0x45, 0xc1, 0x01, 0x4c, 0x6a, 0x84, 0x2e, 0xae, 0x5b, 0xcf, 0x1c, 0x87, 0xa1, 0xad, 0x17, 0xc4, 0x11, 0x8a, 0x43, 0xac, 0x6e, 0xbb, 0x1c, 0x8c, 0x87, 0xac, 0x24, 0xb2, 0xab, 0x8e, 0x84, 0xac, 0x16, 0xaa, 0x36, 0x90, 0x51, 0xac, 0x2c, 0xa1, 0xba, 0x91, 0x76, 0xac, 0xec, 0x99, 0x39, 0x92, 0x5b, 0xad, 0xd0, 0x90, 0xa7, 0x92, 0xad, 0xaf, 0x5d, 0x88, 0x7d, 0x92, 0xf0, 0xb0, 0xdb, 0x80, 0x63, 0x92, 0xee, 0xb2, 0xd9, 0x79, 0x43, 0x92, 0xcf, 0xb4, 0xd7, 0x72, 0x3a, 0x92, 0xdb, 0xb6, 0x91, 0x6b, 0x3d, 0x92, 0xe2, 0xb8, 0x35, 0x64, 0x60, 0x93, 0x11, 0xb9, 0x8b, 0x5d, 0x7c, 0x93, 0xb8, 0xba, 0x34, 0x56, 0x5a, 0x94, 0x77, 0xba, 0xf1, 0x4f, 0x05, 0x8b, 0x8c, 0xa8, 0x19, 0xd2, 0x4c, 0x8f, 0x26, 0xa6, 0xa6, 0xc8, 0x58, 0x92, 0x88, 0xa5, 0x8d, 0xbf, 0x41, 0x94, 0xda, 0xa5, 0x45, 0xb6, 0xaa, 0x96, 0xf4, 0xa5, 0x14, 0xae, 0x22, 0x98, 0xb0, 0xa5, 0x3e, 0xa5, 0x97, 0x9a, 0x0b, 0xa5, 0xac, 0x9d, 0x12, 0x9a, 0xc7, 0xa6, 0x89, 0x94, 0x8c, 0x9b, 0x5d, 0xa7, 0xbc, 0x8c, 0x4c, 0x9b, 0xcc, 0xa9, 0x47, 0x84, 0x57, 0x9b, 0xf0, 0xab, 0x1c, 0x7c, 0xb6, 0x9b, 0xbc, 0xad, 0x53, 0x75, 0x83, 0x9b, 0x90, 0xaf, 0x68, 0x6e, 0x4b, 0x9b, 0xe2, 0xb0, 0xdc, 0x67, 0x28, 0x9c, 0x1e, 0xb2, 0x4e, 0x60, 0x2f, 0x9d, 0x40, 0xb3, 0x6a, 0x59, 0x6e, 0x9e, 0x6e, 0xb4, 0x89, 0x52, 0x24, 0x93, 0x63, 0xa1, 0x69, 0xd6, 0x3a, 0x97, 0x7f, 0x9f, 0xdd, 0xcc, 0x4e, 0x9a, 0xec, 0x9e, 0xe6, 0xc3, 0x7f, 0x9d, 0x72, 0x9e, 0x5c, 0xba, 0xdc, 0x9f, 0x85, 0x9e, 0x04, 0xb2, 0x47, 0xa1, 0x3f, 0x9e, 0x25, 0xa9, 0xbb, 0xa2, 0xbb, 0x9e, 0x6b, 0xa1, 0x24, 0xa3, 0x79, 0x9f, 0x33, 0x98, 0xa0, 0xa4, 0x17, 0xa0, 0x0c, 0x90, 0x20, 0xa4, 0x92, 0xa1, 0xa1, 0x88, 0x67, 0xa4, 0xf5, 0xa3, 0x27, 0x80, 0x99, 0xa5, 0x02, 0xa5, 0x36, 0x79, 0x6c, 0xa4, 0xee, 0xa7, 0x57, 0x72, 0x54, 0xa5, 0x46, 0xa9, 0x09, 0x6a, 0xf0, 0xa5, 0xbd, 0xaa, 0x81, 0x63, 0x80, 0xa6, 0x91, 0xac, 0x19, 0x5c, 0x69, 0xa7, 0xc9, 0xad, 0xcb, 0x55, 0x04, 0x9d, 0xbc, 0x99, 0xc7, 0xda, 0x46, 0xa1, 0x72, 0x98, 0x85, 0xcf, 0xfe, 0xa4, 0x3e, 0x97, 0xe9, 0xc7, 0xa3, 0xa6, 0xdb, 0x97, 0x56, 0xbf, 0x4b, 0xa8, 0xf1, 0x97, 0x10, 0xb6, 0xcb, 0xaa, 0xbf, 0x96, 0xef, 0xae, 0x4b, 0xab, 0xec, 0x97, 0x3e, 0xa5, 0xa8, 0xac, 0xd2, 0x97, 0xc9, 0x9d, 0x28, 0xad, 0x63, 0x98, 0xbc, 0x94, 0xf0, 0xad, 0xd1, 0x99, 0xec, 0x8c, 0xe3, 0xad, 0xff, 0x9b, 0x74, 0x85, 0x0f, 0xae, 0x23, 0x9d, 0x26, 0x7d, 0x68, 0xae, 0x33, 0x9f, 0x1d, 0x76, 0x2c, 0xae, 0x50, 0xa1, 0x09, 0x6f, 0x00, 0xaf, 0x3a, 0xa2, 0xa8, 0x67, 0x5e, 0xaf, 0xf8, 0xa4, 0x42, 0x5f, 0xe1, 0xb1, 0x5a, 0xa6, 0x6a, 0x58, 0x30, 0xa7, 0xbd, 0x92, 0x7a, 0xde, 0x0b, 0xaa, 0xd1, 0x91, 0x9f, 0xd4, 0xb0, 0xad, 0x90, 0x90, 0xe3, 0xcb, 0xf3, 0xb0, 0x2a, 0x90, 0x41, 0xc3, 0x9e, 0xb2, 0x50, 0x8f, 0xf2, 0xbb, 0x35, 0xb4, 0x1b, 0x8f, 0xc8, 0xb2, 0xbf, 0xb5, 0x46, 0x90, 0x04, 0xaa, 0x3e, 0xb6, 0x29, 0x90, 0x71, 0xa1, 0xb6, 0xb6, 0xa6, 0x91, 0x63, 0x99, 0x81, 0xb7, 0x08, 0x92, 0x6a, 0x91, 0x58, 0xb7, 0x4d, 0x93, 0xcd, 0x89, 0x8d, 0xb7, 0x7a, 0x95, 0x42, 0x81, 0xd2, 0xb7, 0xf4, 0x97, 0x15, 0x7a, 0x4f, 0xb8, 0x48, 0x98, 0xf7, 0x72, 0xf8, 0xb8, 0xf8, 0x9a, 0xd4, 0x6b, 0x74, 0xb9, 0xf8, 0x9c, 0x99, 0x63, 0xba, 0xbb, 0x25, 0x9e, 0xab, 0x5b, 0xa4, 0xb1, 0x9e, 0x8b, 0x65, 0xe2, 0x8a, 0xb4, 0x98, 0x8a, 0xcb, 0xd9, 0x3f, 0xb7, 0x46, 0x8a, 0x40, 0xd0, 0x77, 0xb9, 0xb3, 0x89, 0xc9, 0xc8, 0x0b, 0xbb, 0xf3, 0x89, 0x56, 0xbf, 0x9f, 0xbd, 0x89, 0x89, 0x2c, 0xb7, 0x37, 0xbe, 0xeb, 0x89, 0x1c, 0xae, 0xdb, 0xbf, 0x9e, 0x89, 0x86, 0xa6, 0x98, 0xc0, 0x31, 0x8a, 0x12, 0x9e, 0x5b, 0xc0, 0x71, 0x8b, 0x1c, 0x96, 0x35, 0xc0, 0xb1, 0x8c, 0x2f, 0x8e, 0x28, 0xc0, 0xea, 0x8d, 0x71, 0x86, 0x68, 0xc1, 0x2c, 0x8e, 0xda, 0x7e, 0xa7, 0xc2, 0x21, 0x90, 0xcc, 0x77, 0x12, 0xc2, 0xe0, 0x92, 0xb9, 0x6f, 0xa4, 0xc4, 0x29, 0x95, 0x03, 0x68, 0x08, 0xc5, 0x67, 0x97, 0x3b, 0x60, 0x1d, 0xba, 0xef, 0x84, 0x83, 0xe6, 0xa9, 0xbd, 0xe3, 0x84, 0x21, 0xdd, 0x4c, 0xc0, 0x69, 0x83, 0xba, 0xd4, 0xc8, 0xc2, 0xaa, 0x83, 0x42, 0xcc, 0x50, 0xc4, 0xbb, 0x82, 0xb4, 0xc3, 0xd5, 0xc6, 0x5d, 0x82, 0x6c, 0xbb, 0x78, 0xc7, 0xa7, 0x82, 0x54, 0xb3, 0x39, 0xc8, 0x77, 0x82, 0x92, 0xab, 0x25, 0xc8, 0xff, 0x83, 0x05, 0xa3, 0x25, 0xc9, 0x5e, 0x83, 0xd8, 0x9b, 0x19, 0xc9, 0x9e, 0x84, 0xe1, 0x93, 0x03, 0xca, 0x10, 0x85, 0xf3, 0x8b, 0x32, 0xca, 0x8b, 0x87, 0x10, 0x83, 0x88, 0xcb, 0x4b, 0x88, 0xca, 0x7b, 0xe2, 0xcc, 0x42, 0x8a, 0xdf, 0x74, 0x4a, 0xcd, 0x6a, 0x8d, 0x29, 0x6c, 0x99, 0xcf, 0x00, 0x8f, 0xe1, 0x64, 0x62, 0xc3, 0xd8, 0x7d, 0x69, 0xe9, 0xc4, 0xc6, 0x8f, 0x7d, 0x17, 0xe0, 0xdb, 0xc8, 0xfe, 0x7c, 0xa2, 0xd8, 0x67, 0xcb, 0x59, 0x7c, 0x24, 0xd0, 0x02, 0xcd, 0x51, 0x7b, 0xb1, 0xc7, 0xa9, 0xcf, 0x1d, 0x7b, 0x55, 0xbf, 0x64, 0xd0, 0x2a, 0x7b, 0x47, 0xb7, 0x6b, 0xd1, 0x15, 0x7b, 0x4a, 0xaf, 0x7a, 0xd1, 0x95, 0x7b, 0xe3, 0xa7, 0xae, 0xd2, 0x0c, 0x7c, 0x77, 0x9f, 0xde, 0xd2, 0x62, 0x7d, 0x92, 0x97, 0xf6, 0xd2, 0xaf, 0x7e, 0x9a, 0x90, 0x05, 0xd3, 0x6b, 0x7f, 0xa7, 0x88, 0x44, 0xd4, 0x2e, 0x80, 0x80, 0x80, 0x80, 0xd5, 0x5b, 0x83, 0x24, 0x78, 0xce, 0xd6, 0x6d, 0x85, 0x7e, 0x71, 0x17, 0xd7, 0xcb, 0x88, 0x7d, 0x68, 0xca, 0xcd, 0x2a, 0x75, 0x9e, 0xec, 0x1c, 0xcf, 0xe7, 0x75, 0x3f, 0xe2, 0xe6, 0xd1, 0xfd, 0x74, 0xf8, 0xda, 0xe1, 0xd3, 0xf3, 0x74, 0xb4, 0xd3, 0x00, 0xd5, 0xa6, 0x74, 0x77, 0xcb, 0x1b, 0xd7, 0x20, 0x74, 0x40, 0xc3, 0x30, 0xd8, 0x30, 0x74, 0x38, 0xbb, 0x6a, 0xd8, 0xfe, 0x74, 0x52, 0xb3, 0xb8, 0xd9, 0xb8, 0x74, 0xbd, 0xac, 0x18, 0xda, 0x59, 0x75, 0x6c, 0xa4, 0x84, 0xda, 0xe7, 0x76, 0x50, 0x9c, 0xec, 0xdb, 0x60, 0x77, 0x78, 0x95, 0x50, 0xdb, 0xee, 0x78, 0xa3, 0x8d, 0x9a, 0xdc, 0xb9, 0x79, 0xea, 0x85, 0xab, 0xdd, 0xc9, 0x7b, 0x8b, 0x7d, 0xa2, 0xdf, 0x68, 0x7e, 0x0c, 0x75, 0x8b, 0xe1, 0x26, 0x80, 0xac, 0x6d, 0x44, 0xd6, 0x6b, 0x6d, 0xf7, 0xec, 0xf3, 0xd8, 0xc4, 0x6d, 0xb5, 0xe4, 0xa5, 0xda, 0xd4, 0x6d, 0x84, 0xdd, 0x12, 0xdc, 0xb3, 0x6d, 0x5c, 0xd5, 0xbb, 0xde, 0x66, 0x6d, 0x33, 0xce, 0x69, 0xdf, 0x80, 0x6d, 0x1f, 0xc6, 0xd0, 0xe0, 0x7f, 0x6d, 0x1d, 0xbf, 0x47, 0xe1, 0x33, 0x6d, 0x69, 0xb7, 0xf3, 0xe1, 0xe7, 0x6d, 0xb0, 0xb0, 0xa0, 0xe2, 0x8b, 0x6e, 0x77, 0xa9, 0x3a, 0xe3, 0x2f, 0x6f, 0x38, 0xa1, 0xc7, 0xe3, 0xc4, 0x70, 0x5d, 0x9a, 0x40, 0xe4, 0x4e, 0x71, 0xa9, 0x92, 0xbe, 0xe5, 0x0a, 0x73, 0x25, 0x8a, 0xe5, 0xe5, 0xee, 0x74, 0xaa, 0x82, 0xdd, 0xe7, 0x6d, 0x77, 0x0e, 0x7a, 0x90, 0xe9, 0x32, 0x79, 0xcb, 0x72, 0x14, 0xe0, 0x96, 0x66, 0x0f, 0xed, 0x63, 0xe2, 0xf2, 0x65, 0xd9, 0xe6, 0x00, 0xe5, 0x3b, 0x65, 0x8a, 0xdf, 0x02, 0xe6, 0xc5, 0x65, 0x96, 0xd8, 0x48, 0xe8, 0x3f, 0x65, 0xa9, 0xd1, 0x9a, 0xe8, 0xfc, 0x66, 0x0c, 0xca, 0x9a, 0xe9, 0x9d, 0x66, 0x6b, 0xc3, 0x89, 0xea, 0x51, 0x66, 0xce, 0xbc, 0x8f, 0xea, 0xfc, 0x67, 0x46, 0xb5, 0x9d, 0xeb, 0x7c, 0x67, 0xee, 0xae, 0x88, 0xec, 0x10, 0x68, 0xd4, 0xa7, 0x51, 0xec, 0x81, 0x69, 0xc6, 0x9f, 0xf4, 0xed, 0x0b, 0x6a, 0xfe, 0x98, 0x58, 0xed, 0x9a, 0x6c, 0x33, 0x90, 0xb2, 0xee, 0xa1, 0x6e, 0x04, 0x88, 0x70, 0xef, 0xe6, 0x6f, 0xee, 0x80, 0x23, 0xf2, 0x7e, 0x72, 0xa0, 0x77, 0x09, 0x6a, 0xd0, 0xcf, 0x73, 0xbd, 0xff, 0x6b, 0x8f, 0xd0, 0xba, 0xb6, 0xae, 0x6b, 0xb0, 0xd2, 0x7c, 0xaf, 0x76, 0x6a, 0x9b, 0xd5, 0x09, 0xa7, 0x53, 0x6a, 0xc9, 0xd6, 0x5f, 0x9f, 0xd0, 0x6a, 0x74, 0xd7, 0x6c, 0x97, 0x5e, 0x6a, 0x14, 0xd8, 0x64, 0x8f, 0x10, 0x69, 0xab, 0xd9, 0x15, 0x87, 0x3a, 0x69, 0x6d, 0xd9, 0x93, 0x7f, 0x80, 0x68, 0xb9, 0xda, 0x51, 0x78, 0x88, 0x68, 0x5d, 0xda, 0xc4, 0x71, 0x81, 0x67, 0xc3, 0xdb, 0x5d, 0x6b, 0x42, 0x66, 0xaa, 0xdc, 0x26, 0x65, 0x0e, 0x66, 0x93, 0xdc, 0x46, 0x5e, 0xf2, 0x65, 0xed, 0xdc, 0x1e, 0x58, 0x55, 0x66, 0x16, 0xdb, 0xa1, 0x51, 0xc1, 0x66, 0x05, 0xdb, 0x39, 0x4b, 0x29, 0x6f, 0xd1, 0xcb, 0x08, 0xc0, 0x55, 0x71, 0x23, 0xcb, 0xbe, 0xb8, 0xbb, 0x72, 0x4c, 0xcc, 0x98, 0xb1, 0x6e, 0x72, 0xd9, 0xcd, 0xd2, 0xa9, 0x8e, 0x73, 0x1a, 0xcf, 0x3c, 0xa1, 0x98, 0x73, 0x2b, 0xd0, 0x87, 0x99, 0x10, 0x73, 0x0e, 0xd1, 0xbc, 0x90, 0x75, 0x72, 0xb5, 0xd2, 0xe6, 0x88, 0x68, 0x72, 0x44, 0xd3, 0xfa, 0x80, 0x74, 0x71, 0xa1, 0xd4, 0xf5, 0x79, 0x93, 0x70, 0xfa, 0xd5, 0xd6, 0x72, 0xaf, 0x70, 0x70, 0xd6, 0xb2, 0x6c, 0x66, 0x6f, 0xf0, 0xd7, 0x7d, 0x66, 0x68, 0x6f, 0x6a, 0xd8, 0x37, 0x60, 0x6a, 0x6f, 0x07, 0xd8, 0x07, 0x59, 0xa9, 0x6e, 0xa8, 0xd7, 0xca, 0x52, 0xc8, 0x6e, 0xad, 0xd7, 0x4f, 0x4c, 0x1e, 0x74, 0xd1, 0xc6, 0xf0, 0xc2, 0xe9, 0x76, 0x96, 0xc7, 0x22, 0xba, 0xd2, 0x78, 0x18, 0xc7, 0x8a, 0xb3, 0x34, 0x79, 0x1c, 0xc8, 0x52, 0xab, 0x53, 0x79, 0xac, 0xc9, 0x68, 0xa3, 0x3f, 0x7a, 0x11, 0xca, 0x8d, 0x9a, 0xe9, 0x7a, 0x5a, 0xcb, 0xc4, 0x92, 0x5a, 0x7a, 0x59, 0xcd, 0x20, 0x89, 0xf7, 0x7a, 0x33, 0xce, 0x8f, 0x81, 0x92, 0x79, 0xb5, 0xd0, 0x03, 0x7a, 0x7a, 0x79, 0x16, 0xd1, 0x26, 0x73, 0xbf, 0x78, 0x89, 0xd2, 0x3b, 0x6d, 0x67, 0x78, 0x10, 0xd3, 0x42, 0x67, 0x7b, 0x77, 0x90, 0xd4, 0x35, 0x61, 0x8f, 0x77, 0x20, 0xd4, 0x31, 0x5a, 0xe3, 0x76, 0xc0, 0xd3, 0xec, 0x53, 0xd0, 0x76, 0xb6, 0xd3, 0x5e, 0x4c, 0xf8, 0x79, 0xe6, 0xc2, 0xd5, 0xc5, 0xec, 0x7b, 0xd7, 0xc2, 0xbd, 0xbd, 0x19, 0x7d, 0x9c, 0xc2, 0xcd, 0xb5, 0x3c, 0x7f, 0x1c, 0xc3, 0x1f, 0xad, 0x52, 0x80, 0x18, 0xc3, 0xf3, 0xa5, 0x27, 0x80, 0xe7, 0xc4, 0xe4, 0x9c, 0xf2, 0x81, 0x78, 0xc5, 0xef, 0x94, 0x7f, 0x81, 0xc1, 0xc7, 0x20, 0x8c, 0x26, 0x81, 0xc8, 0xc8, 0x71, 0x83, 0xd6, 0x81, 0x90, 0xc9, 0xe4, 0x7c, 0x5e, 0x81, 0x1f, 0xcb, 0x65, 0x75, 0x89, 0x80, 0x99, 0xcc, 0xe2, 0x6e, 0xd5, 0x80, 0x1b, 0xce, 0x67, 0x68, 0xb4, 0x7f, 0x89, 0xcf, 0xd7, 0x62, 0xb5, 0x7f, 0x18, 0xd0, 0x1c, 0x5c, 0x37, 0x7e, 0xc7, 0xcf, 0x98, 0x55, 0x13, 0x7e, 0xc6, 0xce, 0xf4, 0x4d, 0xf2, 0x7f, 0x30, 0xbe, 0x6c, 0xca, 0x3f, 0x81, 0x8f, 0xbe, 0x00, 0xbf, 0xaa, 0x83, 0xaa, 0xbd, 0xdf, 0xb7, 0xc1, 0x85, 0x9e, 0xbd, 0xd0, 0xaf, 0xda, 0x86, 0xd8, 0xbe, 0xa3, 0xa7, 0x92, 0x87, 0xe8, 0xbf, 0x8d, 0x9f, 0x52, 0x88, 0x8a, 0xc0, 0x79, 0x96, 0xd2, 0x89, 0x11, 0xc1, 0x64, 0x8e, 0x76, 0x89, 0x40, 0xc2, 0x84, 0x86, 0x58, 0x89, 0x49, 0xc3, 0xca, 0x7e, 0x8d, 0x89, 0x09, 0xc5, 0x63, 0x77, 0xd0, 0x88, 0xac, 0xc6, 0xed, 0x71, 0x11, 0x88, 0x54, 0xc8, 0x66, 0x6a, 0xb5, 0x87, 0xe7, 0xc9, 0xdb, 0x64, 0x7c, 0x87, 0x89, 0xca, 0xeb, 0x5e, 0x21, 0x87, 0x9b, 0xca, 0xaa, 0x57, 0x11, 0x87, 0xb8, 0xca, 0x76, 0x4f, 0x6f, 0x86, 0x1e, 0xb8, 0xef, 0xce, 0xa8, 0x88, 0xda, 0xb8, 0x42, 0xc3, 0x78, 0x8a, 0xe8, 0xb8, 0x02, 0xba, 0xfe, 0x8c, 0xc4, 0xb7, 0xfb, 0xb2, 0xee, 0x8e, 0x49, 0xb8, 0x51, 0xaa, 0xb6, 0x8f, 0x88, 0xb8, 0xe6, 0xa2, 0x66, 0x90, 0x58, 0xb9, 0xbf, 0x99, 0xed, 0x90, 0xef, 0xba, 0xb2, 0x91, 0x63, 0x91, 0x1d, 0xbb, 0xee, 0x89, 0x3a, 0x91, 0x31, 0xbd, 0x25, 0x81, 0x12, 0x90, 0xe2, 0xbf, 0x11, 0x7a, 0x00, 0x90, 0x97, 0xc0, 0xe0, 0x73, 0x3d, 0x90, 0x61, 0xc2, 0x64, 0x6c, 0xb8, 0x90, 0x20, 0xc3, 0xce, 0x66, 0x5b, 0x8f, 0xc5, 0xc5, 0x3c, 0x60, 0x13, 0x90, 0x13, 0xc5, 0x60, 0x58, 0xfd, 0x90, 0x61, 0xc5, 0x8e, 0x51, 0xb2, 0x8c, 0xe3, 0xb3, 0xef, 0xd1, 0xa7, 0x8f, 0xaf, 0xb2, 0xeb, 0xc7, 0x6d, 0x92, 0x2b, 0xb2, 0x3e, 0xbe, 0x95, 0x94, 0x28, 0xb2, 0x40, 0xb6, 0x79, 0x95, 0xfb, 0xb2, 0x4e, 0xae, 0x59, 0x97, 0x6e, 0xb2, 0x9f, 0xa5, 0xf3, 0x98, 0x8f, 0xb3, 0x1d, 0x9d, 0x83, 0x99, 0x12, 0xb3, 0xf4, 0x94, 0xf2, 0x99, 0x6d, 0xb5, 0x05, 0x8c, 0xa7, 0x99, 0x96, 0xb6, 0x55, 0x84, 0xaf, 0x99, 0x84, 0xb7, 0xe8, 0x7d, 0x27, 0x99, 0x2a, 0xb9, 0xdb, 0x76, 0x3f, 0x98, 0xc1, 0xbb, 0xc1, 0x6f, 0x5d, 0x98, 0x9c, 0xbd, 0x50, 0x68, 0xae, 0x98, 0x5a, 0xbe, 0xe9, 0x62, 0x1d, 0x98, 0x93, 0xbf, 0xbc, 0x5b, 0x29, 0x99, 0x11, 0xc0, 0x2c, 0x53, 0xe5, 0x94, 0x00, 0xae, 0xb7, 0xd5, 0x1b, 0x97, 0x5f, 0xad, 0x24, 0xcb, 0x6e, 0x9a, 0x23, 0xac, 0x38, 0xc2, 0xbd, 0x9c, 0x48, 0xab, 0xde, 0xba, 0x67, 0x9e, 0x1c, 0xab, 0xb3, 0xb2, 0x2d, 0x9f, 0xa2, 0xab, 0xd9, 0xa9, 0xbe, 0xa1, 0x04, 0xac, 0x29, 0xa1, 0x3f, 0xa1, 0x6e, 0xac, 0xf3, 0x98, 0xa1, 0xa1, 0xa4, 0xad, 0xd1, 0x8f, 0xf9, 0xa1, 0xde, 0xaf, 0x44, 0x87, 0xfe, 0xa2, 0x0f, 0xb0, 0xa1, 0x80, 0x09, 0xa1, 0xe4, 0xb2, 0xa2, 0x79, 0x3a, 0xa1, 0x95, 0xb4, 0x97, 0x72, 0x66, 0xa1, 0x8c, 0xb6, 0x23, 0x6b, 0x8d, 0xa1, 0x9c, 0xb7, 0x82, 0x64, 0xc4, 0xa1, 0xdc, 0xb8, 0xc6, 0x5e, 0x05, 0xa2, 0xb8, 0xb9, 0xb9, 0x56, 0xed, 0x9c, 0xfa, 0xa7, 0x89, 0xd9, 0xb3, 0xa0, 0x5d, 0xa6, 0x11, 0xcf, 0x03, 0xa3, 0x02, 0xa5, 0x91, 0xc6, 0xc8, 0xa5, 0x54, 0xa5, 0x25, 0xbe, 0x94, 0xa6, 0xfb, 0xa5, 0x02, 0xb6, 0x40, 0xa8, 0x6d, 0xa4, 0xfb, 0xad, 0xdf, 0xa9, 0x83, 0xa5, 0x43, 0xa5, 0x3d, 0xaa, 0x4d, 0xa5, 0xc7, 0x9c, 0xb5, 0xaa, 0xb8, 0xa6, 0xa5, 0x94, 0x51, 0xaa, 0xe9, 0xa7, 0xcc, 0x8c, 0x2a, 0xaa, 0xe4, 0xa9, 0x32, 0x84, 0x3a, 0xaa, 0xcd, 0xaa, 0xe3, 0x7c, 0xb6, 0xaa, 0x9f, 0xac, 0xfa, 0x75, 0xba, 0xaa, 0x6d, 0xae, 0xff, 0x6e, 0xbc, 0xaa, 0xb5, 0xb0, 0x5c, 0x67, 0x9a, 0xaa, 0xf5, 0xb1, 0xa8, 0x60, 0xaa, 0xab, 0xf1, 0xb3, 0x04, 0x59, 0x9d, 0xa6, 0xdc, 0xa0, 0x60, 0xdd, 0x82, 0xa9, 0xed, 0x9f, 0x63, 0xd3, 0x82, 0xac, 0x69, 0x9e, 0xc7, 0xca, 0xf4, 0xae, 0x9a, 0x9e, 0x49, 0xc2, 0xc2, 0xb0, 0x3c, 0x9e, 0x0b, 0xba, 0x76, 0xb1, 0x8d, 0x9d, 0xf4, 0xb2, 0x1c, 0xb2, 0x67, 0x9e, 0x28, 0xa9, 0x71, 0xb3, 0x0d, 0x9e, 0x76, 0xa0, 0xa9, 0xb3, 0x7b, 0x9f, 0x5a, 0x98, 0x71, 0xb3, 0xde, 0xa0, 0x41, 0x90, 0x3d, 0xb3, 0xe9, 0xa1, 0xc1, 0x88, 0x83, 0xb3, 0xd5, 0xa3, 0x30, 0x80, 0xb9, 0xb3, 0xde, 0xa5, 0x08, 0x79, 0x9d, 0xb3, 0xcf, 0xa6, 0xf0, 0x72, 0xa1, 0xb4, 0x28, 0xa8, 0xa3, 0x6b, 0x67, 0xb4, 0xac, 0xaa, 0x22, 0x64, 0x0f, 0xb5, 0x60, 0xab, 0xc9, 0x5c, 0x93, 0xb0, 0xfd, 0x99, 0x17, 0xe1, 0x79, 0xb3, 0xac, 0x98, 0x7a, 0xd8, 0x26, 0xb6, 0x0b, 0x97, 0xed, 0xcf, 0x78, 0xb8, 0x08, 0x97, 0x8a, 0xc7, 0x3d, 0xb9, 0xd2, 0x97, 0x3a, 0xbe, 0xf7, 0xbb, 0x1a, 0x97, 0x0f, 0xb6, 0x91, 0xbc, 0x2e, 0x97, 0x08, 0xae, 0x23, 0xbc, 0xb3, 0x97, 0x83, 0xa5, 0x91, 0xbd, 0x0f, 0x98, 0x2b, 0x9d, 0x1e, 0xbd, 0x2e, 0x99, 0x1c, 0x94, 0xdd, 0xbd, 0x39, 0x9a, 0x3f, 0x8c, 0xd7, 0xbd, 0x2c, 0x9b, 0xad, 0x85, 0x2b, 0xbd, 0x23, 0x9d, 0x3c, 0x7d, 0xa2, 0xbd, 0x11, 0x9e, 0xf7, 0x76, 0x69, 0xbd, 0x03, 0xa0, 0xad, 0x6f, 0x3e, 0xbe, 0x26, 0xa2, 0x57, 0x67, 0xa2, 0xbf, 0x19, 0xa3, 0xfd, 0x5f, 0xf3, 0xba, 0x14, 0x92, 0x46, 0xe6, 0x2e, 0xbc, 0xeb, 0x91, 0x9d, 0xdc, 0x96, 0xbf, 0x28, 0x91, 0x1a, 0xd3, 0xf5, 0xc1, 0x1f, 0x90, 0xba, 0xcb, 0x83, 0xc2, 0xdd, 0x90, 0x70, 0xc3, 0x30, 0xc4, 0x31, 0x90, 0x3f, 0xba, 0xe0, 0xc5, 0x3d, 0x90, 0x25, 0xb2, 0x8b, 0xc5, 0xcf, 0x90, 0x6c, 0xaa, 0x2d, 0xc6, 0x2a, 0x90, 0xda, 0xa1, 0xc9, 0xc6, 0x5f, 0x91, 0xbf, 0x99, 0xa0, 0xc6, 0x7f, 0x92, 0xb7, 0x91, 0x78, 0xc6, 0x94, 0x93, 0xf7, 0x89, 0xbc, 0xc6, 0x94, 0x95, 0x4a, 0x82, 0x13, 0xc6, 0xf7, 0x97, 0x06, 0x7a, 0xa2, 0xc7, 0x40, 0x98, 0xd3, 0x73, 0x53, 0xc7, 0xf9, 0x9a, 0xc8, 0x6b, 0xd2, 0xc9, 0x35, 0x9c, 0xd2, 0x64, 0x06, 0xc2, 0xfd, 0x8b, 0x85, 0xea, 0x48, 0xc5, 0xc9, 0x8a, 0xfe, 0xe0, 0xa8, 0xc7, 0xe5, 0x8a, 0x95, 0xd8, 0x37, 0xc9, 0xef, 0x8a, 0x2d, 0xcf, 0xdd, 0xcb, 0xaa, 0x89, 0xd0, 0xc7, 0x87, 0xcd, 0x41, 0x89, 0x80, 0xbf, 0x35, 0xce, 0x2b, 0x89, 0x75, 0xb6, 0xf3, 0xce, 0xee, 0x89, 0x81, 0xae, 0xb7, 0xcf, 0x28, 0x89, 0xf2, 0xa6, 0x95, 0xcf, 0x57, 0x8a, 0x7b, 0x9e, 0x70, 0xcf, 0x6f, 0x8b, 0x6a, 0x96, 0x4f, 0xcf, 0x8b, 0x8c, 0x5d, 0x8e, 0x46, 0xcf, 0xc5, 0x8d, 0x6f, 0x86, 0xa3, 0xd0, 0x05, 0x8e, 0x9f, 0x7f, 0x02, 0xd0, 0xdf, 0x90, 0x91, 0x77, 0x83, 0xd1, 0x86, 0x92, 0x8a, 0x70, 0x20, 0xd2, 0xeb, 0x95, 0x44, 0x68, 0x44, 0xcb, 0x68, 0x84, 0xdd, 0xed, 0x8b, 0xcd, 0xf0, 0x84, 0x77, 0xe4, 0x7d, 0xd0, 0x17, 0x84, 0x0e, 0xdc, 0x39, 0xd1, 0xf5, 0x83, 0x9b, 0xd3, 0xc6, 0xd3, 0x9b, 0x83, 0x38, 0xcb, 0x61, 0xd5, 0x0d, 0x82, 0xe4, 0xc3, 0x0c, 0xd6, 0x1a, 0x82, 0xbd, 0xba, 0xe3, 0xd6, 0xef, 0x82, 0xae, 0xb2, 0xd0, 0xd7, 0x74, 0x82, 0xfe, 0xaa, 0xe4, 0xd7, 0xcb, 0x83, 0x78, 0xa3, 0x08, 0xd8, 0x21, 0x84, 0x39, 0x9b, 0x32, 0xd8, 0x6e, 0x85, 0x21, 0x93, 0x5b, 0xd8, 0xdd, 0x86, 0x29, 0x8b, 0x87, 0xd9, 0x50, 0x87, 0x50, 0x83, 0xb3, 0xda, 0x07, 0x89, 0x18, 0x7b, 0xea, 0xda, 0xee, 0x8b, 0x40, 0x74, 0x2d, 0xdb, 0xff, 0x8d, 0x8b, 0x6c, 0x3d, 0xd2, 0xc2, 0x7e, 0x26, 0xf0, 0xf0, 0xd5, 0x63, 0x7d, 0xce, 0xe7, 0xb3, 0xd7, 0x7b, 0x7d, 0x62, 0xdf, 0x96, 0xd9, 0x7a, 0x7c, 0xd4, 0xd7, 0x1d, 0xdb, 0x5c, 0x7c, 0x50, 0xce, 0xcc, 0xdc, 0xbb, 0x7c, 0x00, 0xc6, 0xb8, 0xdd, 0xf5, 0x7b, 0xbf, 0xbe, 0xb8, 0xde, 0xbc, 0x7b, 0xc1, 0xb6, 0xe3, 0xdf, 0x75, 0x7b, 0xd6, 0xaf, 0x15, 0xdf, 0xe8, 0x7c, 0x64, 0xa7, 0x6a, 0xe0, 0x56, 0x7c, 0xf0, 0x9f, 0xbe, 0xe0, 0xc7, 0x7d, 0xd8, 0x98, 0x27, 0xe1, 0x31, 0x7e, 0xab, 0x90, 0x89, 0xe2, 0x0c, 0x7f, 0xb1, 0x88, 0x86, 0xe2, 0xe1, 0x80, 0x80, 0x80, 0x80, 0xe4, 0x46, 0x83, 0x7c, 0x78, 0xa4, 0xe5, 0x91, 0x86, 0x11, 0x70, 0xa6, 0xdb, 0xb6, 0x76, 0xaa, 0xf1, 0xf2, 0xde, 0x0e, 0x76, 0x4b, 0xe9, 0x87, 0xe0, 0x1d, 0x75, 0xde, 0xe1, 0x9e, 0xe1, 0xf0, 0x75, 0x94, 0xd9, 0xc7, 0xe3, 0x9d, 0x75, 0x5a, 0xd2, 0x09, 0xe4, 0xde, 0x75, 0x28, 0xca, 0x57, 0xe5, 0xfb, 0x74, 0xf3, 0xc2, 0xb3, 0xe6, 0xd9, 0x74, 0xf9, 0xbb, 0x31, 0xe7, 0x97, 0x75, 0x1e, 0xb3, 0xbd, 0xe8, 0x3c, 0x75, 0x8a, 0xac, 0x3f, 0xe8, 0xc4, 0x76, 0x32, 0xa4, 0xb0, 0xe9, 0x43, 0x77, 0x04, 0x9d, 0x15, 0xe9, 0xba, 0x78, 0x0f, 0x95, 0x70, 0xea, 0x51, 0x79, 0x21, 0x8d, 0xa9, 0xeb, 0x47, 0x7a, 0x51, 0x85, 0xa7, 0xec, 0x7e, 0x7b, 0xd5, 0x7d, 0x8f, 0xee, 0x51, 0x7e, 0x7a, 0x75, 0x06, 0xe5, 0x58, 0x6e, 0xca, 0xf2, 0xfa, 0xe7, 0x7d, 0x6e, 0x7e, 0xeb, 0x12, 0xe9, 0x85, 0x6e, 0x2c, 0xe3, 0x82, 0xeb, 0x28, 0x6e, 0x14, 0xdc, 0x2c, 0xec, 0x86, 0x6e, 0x1f, 0xd5, 0x09, 0xed, 0xb7, 0x6e, 0x25, 0xcd, 0xe5, 0xee, 0xad, 0x6e, 0x07, 0xc6, 0xb4, 0xef, 0x97, 0x6d, 0xf3, 0xbf, 0x94, 0xf0, 0x49, 0x6e, 0x35, 0xb8, 0x81, 0xf0, 0xf0, 0x6e, 0x7c, 0xb1, 0x66, 0xf1, 0x7f, 0x6f, 0x20, 0xaa, 0x01, 0xf1, 0xff, 0x6f, 0xdc, 0xa2, 0x79, 0xf2, 0x85, 0x71, 0x03, 0x9a, 0xcf, 0xf3, 0x11, 0x72, 0x36, 0x93, 0x1a, 0xf3, 0xf7, 0x73, 0xaf, 0x8b, 0x19, 0xf5, 0x2a, 0x75, 0x07, 0x82, 0xf4, 0xf9, 0x73, 0x77, 0x19, 0x79, 0x69, 0x73, 0x33, 0xd2, 0xf4, 0xc1, 0x26, 0x74, 0x02, 0xd4, 0x02, 0xb9, 0xe6, 0x74, 0x94, 0xd5, 0x4f, 0xb2, 0xe6, 0x74, 0x6f, 0xd6, 0xfe, 0xab, 0x77, 0x74, 0x32, 0xd8, 0x88, 0xa3, 0xe1, 0x73, 0xab, 0xda, 0x0a, 0x9c, 0x0b, 0x73, 0x08, 0xdb, 0x44, 0x93, 0xd6, 0x72, 0x48, 0xdc, 0x44, 0x8b, 0xfc, 0x71, 0x76, 0xdd, 0x0c, 0x84, 0x61, 0x70, 0xad, 0xdd, 0xac, 0x7d, 0x38, 0x6f, 0xf2, 0xde, 0x30, 0x76, 0x79, 0x6f, 0x3e, 0xde, 0xa7, 0x6f, 0xc3, 0x6e, 0x8e, 0xdf, 0x12, 0x69, 0xc5, 0x6d, 0xdf, 0xdf, 0x6e, 0x63, 0xc3, 0x6d, 0x44, 0xdf, 0x87, 0x5d, 0x9b, 0x6c, 0xc9, 0xdf, 0x43, 0x57, 0x3c, 0x6c, 0x4c, 0xde, 0xff, 0x50, 0xb8, 0x78, 0x99, 0xce, 0x7c, 0xc3, 0x0f, 0x79, 0xf8, 0xcf, 0x01, 0xbb, 0x84, 0x7a, 0xed, 0xd0, 0x00, 0xb4, 0x7c, 0x7b, 0x70, 0xd1, 0x2f, 0xad, 0x47, 0x7b, 0x4b, 0xd2, 0x98, 0xa5, 0x78, 0x7b, 0x13, 0xd4, 0x09, 0x9d, 0x99, 0x7a, 0xdf, 0xd5, 0x54, 0x95, 0x4f, 0x7a, 0x73, 0xd6, 0x90, 0x8d, 0x3b, 0x79, 0xd4, 0xd7, 0xa3, 0x85, 0x76, 0x79, 0x1d, 0xd8, 0x9a, 0x7e, 0x1c, 0x78, 0x60, 0xd9, 0x6f, 0x77, 0x75, 0x77, 0xa0, 0xda, 0x2e, 0x70, 0xc2, 0x77, 0x01, 0xda, 0xd8, 0x6a, 0xcd, 0x76, 0x62, 0xdb, 0x6f, 0x64, 0xe4, 0x75, 0xc4, 0xdb, 0xd4, 0x5e, 0xdf, 0x75, 0x3e, 0xdb, 0x81, 0x58, 0x52, 0x74, 0xb5, 0xdb, 0x2e, 0x51, 0xa3, 0x7d, 0xb3, 0xca, 0x69, 0xc5, 0xb2, 0x7f, 0x5c, 0xca, 0x76, 0xbd, 0x99, 0x80, 0xa2, 0xcb, 0x25, 0xb6, 0x5c, 0x81, 0xc1, 0xcb, 0xfd, 0xaf, 0x33, 0x81, 0xfe, 0xcd, 0x2f, 0xa7, 0x42, 0x82, 0x0b, 0xce, 0x77, 0x9f, 0x5e, 0x82, 0x29, 0xcf, 0xd7, 0x96, 0xf3, 0x82, 0x18, 0xd1, 0x2d, 0x8e, 0xb3, 0x81, 0xb8, 0xd2, 0x66, 0x86, 0xa9, 0x81, 0x32, 0xd3, 0x8d, 0x7e, 0xf8, 0x80, 0x7d, 0xd4, 0x99, 0x78, 0x69, 0x7f, 0xc0, 0xd5, 0x93, 0x71, 0xcd, 0x7f, 0x23, 0xd6, 0x7e, 0x6b, 0xce, 0x7e, 0x8c, 0xd7, 0x52, 0x65, 0xf5, 0x7d, 0xf0, 0xd8, 0x13, 0x60, 0x1c, 0x7d, 0x5e, 0xd7, 0xb0, 0x59, 0x73, 0x7c, 0xd2, 0xd7, 0x46, 0x52, 0x9f, 0x83, 0x3e, 0xc6, 0x73, 0xc8, 0xb6, 0x85, 0x2e, 0xc6, 0x4f, 0xc0, 0x24, 0x86, 0xa7, 0xc6, 0xc5, 0xb8, 0xb0, 0x87, 0xff, 0xc7, 0x49, 0xb1, 0x51, 0x88, 0xc1, 0xc8, 0x3e, 0xa9, 0x7c, 0x89, 0x43, 0xc9, 0x59, 0xa1, 0xa6, 0x89, 0x84, 0xca, 0x80, 0x99, 0x5f, 0x89, 0x9d, 0xcb, 0xb9, 0x90, 0xfd, 0x89, 0x68, 0xcd, 0x07, 0x88, 0xdb, 0x89, 0x15, 0xce, 0x66, 0x80, 0xb0, 0x88, 0x76, 0xcf, 0xd0, 0x79, 0xec, 0x87, 0xc6, 0xd0, 0xf9, 0x73, 0x5d, 0x87, 0x22, 0xd2, 0x0e, 0x6d, 0x28, 0x86, 0x8f, 0xd3, 0x16, 0x67, 0x42, 0x85, 0xfa, 0xd4, 0x0d, 0x61, 0x5e, 0x85, 0x8e, 0xd3, 0xfd, 0x5a, 0xd4, 0x85, 0x2f, 0xd3, 0x90, 0x53, 0xe8, 0x89, 0x5b, 0xc2, 0x6e, 0xcb, 0xd2, 0x8b, 0x27, 0xc2, 0x3d, 0xc3, 0x24, 0x8c, 0xc0, 0xc2, 0x54, 0xbb, 0x30, 0x8e, 0x2e, 0xc2, 0x9a, 0xb3, 0x8a, 0x8f, 0x4a, 0xc3, 0x38, 0xab, 0xb8, 0x90, 0x19, 0xc4, 0x25, 0xa3, 0xcb, 0x90, 0xa0, 0xc5, 0x20, 0x9b, 0xb1, 0x90, 0xdf, 0xc6, 0x1e, 0x93, 0x56, 0x90, 0xe0, 0xc7, 0x34, 0x8b, 0x40, 0x90, 0xb6, 0xc8, 0x5a, 0x83, 0x3f, 0x90, 0x56, 0xc9, 0xc3, 0x7c, 0x1e, 0x8f, 0xcb, 0xcb, 0x3b, 0x75, 0x7f, 0x8f, 0x2d, 0xcc, 0xa2, 0x6e, 0xf6, 0x8e, 0x8b, 0xce, 0x07, 0x68, 0xd0, 0x8d, 0xd3, 0xcf, 0x63, 0x62, 0xbc, 0x8d, 0x87, 0xcf, 0xec, 0x5c, 0x54, 0x8d, 0xa3, 0xcf, 0xbb, 0x55, 0x2b, 0x8f, 0xf0, 0xbd, 0xde, 0xcf, 0x18, 0x91, 0xeb, 0xbd, 0x92, 0xc6, 0x70, 0x93, 0xce, 0xbd, 0x79, 0xbe, 0x44, 0x95, 0x5c, 0xbd, 0xcb, 0xb6, 0x7e, 0x96, 0xbc, 0xbe, 0x35, 0xae, 0xb8, 0x97, 0x81, 0xbf, 0x17, 0xa6, 0x8e, 0x98, 0x12, 0xbf, 0xff, 0x9e, 0x63, 0x98, 0x5e, 0xc0, 0xb7, 0x96, 0x11, 0x98, 0x84, 0xc1, 0x85, 0x8d, 0xec, 0x98, 0x69, 0xc2, 0x80, 0x86, 0x1c, 0x98, 0x33, 0xc3, 0xa4, 0x7e, 0x99, 0x97, 0xcd, 0xc5, 0x2c, 0x78, 0x04, 0x97, 0x53, 0xc6, 0xa5, 0x71, 0x69, 0x96, 0xf4, 0xc7, 0xfa, 0x6b, 0x13, 0x96, 0x87, 0xc9, 0x4c, 0x64, 0xd3, 0x96, 0x21, 0xca, 0x79, 0x5e, 0x7d, 0x96, 0x3d, 0xca, 0xe5, 0x57, 0x63, 0x96, 0xfd, 0xb9, 0x26, 0xd2, 0xc3, 0x99, 0x60, 0xb8, 0x88, 0xca, 0x22, 0x9b, 0x75, 0xb8, 0x31, 0xc2, 0x00, 0x9d, 0x21, 0xb8, 0x40, 0xba, 0x1a, 0x9e, 0x9a, 0xb8, 0x6e, 0xb2, 0x4a, 0x9f, 0xa4, 0xb8, 0xe7, 0xaa, 0x26, 0xa0, 0x71, 0xb9, 0x80, 0xa1, 0xee, 0xa0, 0xac, 0xba, 0x3c, 0x99, 0x8a, 0xa0, 0xbb, 0xba, 0xfe, 0x91, 0x18, 0xa0, 0x97, 0xbc, 0x21, 0x89, 0x2d, 0xa0, 0x57, 0xbd, 0x3b, 0x81, 0x3b, 0x9f, 0xc2, 0xbf, 0x03, 0x7a, 0x43, 0x9f, 0x34, 0xc0, 0xb2, 0x73, 0x94, 0x9e, 0xee, 0xc2, 0x0d, 0x6d, 0x25, 0x9e, 0xce, 0xc3, 0x43, 0x66, 0xd7, 0x9e, 0x9a, 0xc4, 0x84, 0x60, 0x96, 0x9e, 0xd5, 0xc5, 0x50, 0x59, 0x82, 0x9e, 0x95, 0xb3, 0xc6, 0xd6, 0x8a, 0xa0, 0xe2, 0xb3, 0x00, 0xcd, 0xe6, 0xa3, 0x16, 0xb2, 0xb4, 0xc5, 0xfa, 0xa5, 0x09, 0xb2, 0x7c, 0xbe, 0x1c, 0xa6, 0x88, 0xb2, 0x8b, 0xb6, 0x2d, 0xa7, 0xd1, 0xb2, 0xab, 0xae, 0x28, 0xa8, 0xb3, 0xb3, 0x0a, 0xa5, 0xc4, 0xa9, 0x3f, 0xb3, 0x8c, 0x9d, 0x5c, 0xa9, 0x3c, 0xb4, 0x5a, 0x94, 0xe6, 0xa9, 0x25, 0xb5, 0x54, 0x8c, 0xc1, 0xa8, 0xf0, 0xb6, 0x82, 0x84, 0xf9, 0xa8, 0xac, 0xb7, 0xf5, 0x7d, 0x9e, 0xa8, 0x42, 0xb9, 0xd5, 0x76, 0xdd, 0xa7, 0xb1, 0xbb, 0xad, 0x70, 0x0c, 0xa7, 0x98, 0xbc, 0xf8, 0x69, 0x5c, 0xa7, 0x6a, 0xbe, 0x4a, 0x62, 0xc2, 0xa7, 0x86, 0xbf, 0x4a, 0x5b, 0xc5, 0xa7, 0xd2, 0xad, 0xce, 0xdb, 0x1e, 0xaa, 0x4c, 0xad, 0x2a, 0xd2, 0x1c, 0xac, 0x52, 0xac, 0xbe, 0xca, 0x15, 0xae, 0x25, 0xac, 0x59, 0xc2, 0x3e, 0xaf, 0x77, 0xac, 0x3b, 0xba, 0x3e, 0xb0, 0x8a, 0xac, 0x3e, 0xb2, 0x30, 0xb1, 0x47, 0xac, 0x7d, 0xa9, 0xbc, 0xb1, 0xda, 0xac, 0xd1, 0xa1, 0x25, 0xb1, 0xed, 0xad, 0x94, 0x98, 0xa2, 0xb1, 0xdb, 0xae, 0x63, 0x90, 0x13, 0xb1, 0x5c, 0xaf, 0x8d, 0x88, 0x05, 0xb0, 0xe8, 0xb0, 0xa9, 0x80, 0x13, 0xb0, 0xe1, 0xb2, 0x92, 0x79, 0xa1, 0xb0, 0xb0, 0xb4, 0x71, 0x73, 0x16, 0xb0, 0xa4, 0xb5, 0xf3, 0x6c, 0x54, 0xb0, 0xc2, 0xb7, 0x2c, 0x65, 0x84, 0xb0, 0xe6, 0xb8, 0x65, 0x5e, 0x8f, 0xb1, 0xc4, 0xa6, 0xb7, 0xdf, 0xa8, 0xb4, 0x14, 0xa6, 0x53, 0xd6, 0xaf, 0xb6, 0x03, 0xa5, 0xf3, 0xce, 0x79, 0xb7, 0x87, 0xa5, 0xaa, 0xc6, 0x8a, 0xb8, 0xd7, 0xa5, 0x76, 0xbe, 0x89, 0xb9, 0xbb, 0xa5, 0x72, 0xb6, 0x4a, 0xba, 0x75, 0xa5, 0x8c, 0xad, 0xf2, 0xba, 0xc8, 0xa6, 0x08, 0xa5, 0x4f, 0xba, 0xf3, 0xa6, 0xa7, 0x9c, 0xca, 0xba, 0xe7, 0xa7, 0x77, 0x94, 0x6c, 0xba, 0xc2, 0xa8, 0x8a, 0x8c, 0x61, 0xba, 0x78, 0xa9, 0xda, 0x84, 0xa8, 0xba, 0x2d, 0xab, 0x55, 0x7d, 0x3a, 0xb9, 0xe7, 0xad, 0x21, 0x76, 0x50, 0xb9, 0x95, 0xae, 0xf1, 0x6f, 0x6d, 0xb9, 0xd2, 0xb0, 0x4b, 0x68, 0x41, 0xba, 0x00, 0xb1, 0x94, 0x61, 0x25, 0xbb, 0x8b, 0x9f, 0xf7, 0xe4, 0x7b, 0xbd, 0xba, 0x9f, 0x71, 0xdb, 0xaa, 0xbf, 0x88, 0x9f, 0x00, 0xd3, 0x7a, 0xc0, 0xe4, 0x9e, 0xc2, 0xcb, 0x47, 0xc1, 0xfd, 0x9e, 0xa2, 0xc3, 0x09, 0xc2, 0xdd, 0x9e, 0x84, 0xba, 0xb1, 0xc3, 0x96, 0x9e, 0x68, 0xb2, 0x47, 0xc3, 0xdc, 0x9e, 0xcc, 0xa9, 0xbd, 0xc3, 0xf7, 0x9f, 0x5b, 0xa1, 0x21, 0xc3, 0xcc, 0xa0, 0x25, 0x98, 0xb8, 0xc3, 0xa2, 0xa0, 0xfb, 0x90, 0x5a, 0xc3, 0x88, 0xa2, 0x6d, 0x88, 0xd6, 0xc3, 0x58, 0xa3, 0xd5, 0x81, 0x47, 0xc3, 0x32, 0xa5, 0x6b, 0x7a, 0x28, 0xc2, 0xfd, 0xa7, 0x0b, 0x73, 0x26, 0xc3, 0x45, 0xa8, 0xa5, 0x6b, 0xea, 0xc3, 0xd2, 0xaa, 0x29, 0x64, 0x58, 0xc4, 0x88, 0x99, 0x4e, 0xe8, 0xc6, 0xc6, 0xc7, 0x98, 0xc7, 0xdf, 0xe6, 0xc8, 0x59, 0x98, 0x7a, 0xd7, 0xc0, 0xc9, 0xda, 0x98, 0x33, 0xcf, 0xa8, 0xcb, 0x1c, 0x98, 0x04, 0xc7, 0x75, 0xcc, 0x47, 0x97, 0xd9, 0xbf, 0x44, 0xcc, 0xd5, 0x97, 0xdf, 0xb6, 0xdc, 0xcd, 0x44, 0x97, 0xfa, 0xae, 0x75, 0xcd, 0x45, 0x98, 0x66, 0xa6, 0x0a, 0xcd, 0x3d, 0x98, 0xf4, 0x9d, 0xac, 0xcd, 0x22, 0x99, 0xd1, 0x95, 0x71, 0xcc, 0xfe, 0x9a, 0xd7, 0x8d, 0x67, 0xcc, 0xc2, 0x9c, 0x31, 0x85, 0xb9, 0xcc, 0x83, 0x9d, 0xa8, 0x7e, 0x21, 0xcc, 0x54, 0x9f, 0x4a, 0x76, 0xe1, 0xcc, 0x1c, 0xa0, 0xdf, 0x6f, 0xb5, 0xcd, 0x81, 0xa2, 0xc0, 0x68, 0x02, 0xcc, 0x9f, 0x92, 0xad, 0xec, 0xb7, 0xce, 0xbb, 0x92, 0x15, 0xe3, 0xe7, 0xd0, 0x75, 0x91, 0xa5, 0xdb, 0xb5, 0xd1, 0xf2, 0x91, 0x5e, 0xd3, 0x96, 0xd3, 0x41, 0x91, 0x21, 0xcb, 0x68, 0xd4, 0x68, 0x90, 0xf0, 0xc3, 0x2a, 0xd5, 0x1f, 0x90, 0xf2, 0xba, 0xdc, 0xd5, 0x96, 0x91, 0x14, 0xb2, 0x84, 0xd5, 0xcc, 0x91, 0x62, 0xaa, 0x52, 0xd5, 0xe1, 0x91, 0xc1, 0xa2, 0x2c, 0xd5, 0xec, 0x92, 0x7d, 0x9a, 0x16, 0xd5, 0xeb, 0x93, 0x51, 0x92, 0x00, 0xd5, 0xed, 0x94, 0x7b, 0x8a, 0x3a, 0xd5, 0xdf, 0x95, 0xc2, 0x82, 0x87, 0xd6, 0x35, 0x97, 0x70, 0x7b, 0x08, 0xd6, 0x88, 0x99, 0x34, 0x73, 0xa9, 0xd7, 0x3d, 0x9b, 0x33, 0x6c, 0x01, 0xd4, 0x3e, 0x8c, 0x2f, 0xf0, 0x64, 0xd6, 0x46, 0x8b, 0xb9, 0xe7, 0x98, 0xd7, 0xfb, 0x8b, 0x52, 0xdf, 0x77, 0xd9, 0x85, 0x8a, 0xf5, 0xd7, 0x40, 0xda, 0xfe, 0x8a, 0x99, 0xcf, 0x10, 0xdc, 0x1a, 0x8a, 0x63, 0xc6, 0xdf, 0xdd, 0x16, 0x8a, 0x3a, 0xbe, 0xb4, 0xdd, 0x9a, 0x8a, 0x51, 0xb6, 0x8b, 0xde, 0x0f, 0x8a, 0x78, 0xae, 0x74, 0xde, 0x50, 0x8a, 0xdd, 0xa6, 0x98, 0xde, 0x8e, 0x8b, 0x51, 0x9e, 0xbe, 0xde, 0xc0, 0x8c, 0x19, 0x96, 0xe7, 0xde, 0xe8, 0x8c, 0xeb, 0x8f, 0x05, 0xde, 0xed, 0x8e, 0x29, 0x87, 0x07, 0xde, 0xe5, 0x8f, 0x82, 0x7f, 0x0e, 0xdf, 0xbb, 0x91, 0x7d, 0x77, 0x6c, 0xe0, 0x82, 0x93, 0x6e, 0x6f, 0xb6, 0xda, 0xf7, 0x85, 0xe1, 0xf3, 0xba, 0xdd, 0x16, 0x85, 0x8a, 0xeb, 0x45, 0xde, 0xc9, 0x85, 0x10, 0xe3, 0x33, 0xe0, 0x66, 0x84, 0x93, 0xda, 0xf2, 0xe1, 0xe7, 0x84, 0x29, 0xd2, 0x92, 0xe3, 0x18, 0x83, 0xe8, 0xca, 0x6c, 0xe4, 0x25, 0x83, 0xb8, 0xc2, 0x68, 0xe4, 0xec, 0x83, 0xb1, 0xba, 0x75, 0xe5, 0x99, 0x83, 0xb8, 0xb2, 0x8b, 0xe6, 0x13, 0x84, 0x03, 0xaa, 0xc3, 0xe6, 0x74, 0x84, 0x69, 0xa3, 0x04, 0xe6, 0xd8, 0x85, 0x0e, 0x9b, 0x4e, 0xe7, 0x3d, 0x85, 0xd8, 0x93, 0x99, 0xe7, 0xc8, 0x86, 0xcd, 0x8b, 0xc5, 0xe8, 0x66, 0x87, 0xef, 0x83, 0xd9, 0xe9, 0x6d, 0x89, 0xcd, 0x7b, 0xea, 0xea, 0xe9, 0x8c, 0x20, 0x73, 0xce, 0xe1, 0xf4, 0x7f, 0xb4, 0xf7, 0x2c, 0xe3, 0x4d, 0x7f, 0x93, 0xee, 0xeb, 0xe5, 0x3f, 0x7e, 0xf2, 0xe6, 0xae, 0xe6, 0xf8, 0x7e, 0x71, 0xde, 0x80, 0xe8, 0xaf, 0x7d, 0xf5, 0xd6, 0x0c, 0xea, 0x2c, 0x7d, 0x93, 0xcd, 0xde, 0xeb, 0x4c, 0x7d, 0x56, 0xc6, 0x1b, 0xec, 0x5a, 0x7d, 0x25, 0xbe, 0x69, 0xed, 0x2a, 0x7d, 0x24, 0xb6, 0xc9, 0xed, 0xef, 0x7d, 0x2d, 0xaf, 0x2d, 0xee, 0x57, 0x7d, 0x99, 0xa7, 0x7c, 0xee, 0xbd, 0x7d, 0xff, 0x9f, 0xc7, 0xef, 0x21, 0x7e, 0xbd, 0x98, 0x02, 0xef, 0x7f, 0x7f, 0x68, 0x90, 0x39, 0xf0, 0x7d, 0x80, 0x1a, 0x88, 0x5d, 0xf1, 0x70, 0x80, 0x80, 0x80, 0x80, 0xf5, 0xd4, 0x83, 0xf5, 0x77, 0xa1, 0xeb, 0x5e, 0x77, 0xab, 0xf8, 0x75, 0xec, 0xd8, 0x77, 0xad, 0xf0, 0x4e, 0xee, 0x7d, 0x77, 0x4b, 0xe8, 0x6c, 0xf0, 0x07, 0x76, 0xee, 0xe0, 0x96, 0xf1, 0x64, 0x76, 0xbf, 0xd8, 0xe5, 0xf2, 0xaa, 0x76, 0x99, 0xd1, 0x47, 0xf3, 0xbf, 0x76, 0x67, 0xc9, 0xf1, 0xf4, 0xce, 0x76, 0x3d, 0xc2, 0xb9, 0xf5, 0xae, 0x76, 0x44, 0xbb, 0x77, 0xf6, 0x6e, 0x76, 0x64, 0xb4, 0x26, 0xf7, 0x13, 0x76, 0xc8, 0xac, 0xb6, 0xf7, 0xa2, 0x77, 0x72, 0xa5, 0x21, 0xf8, 0x32, 0x78, 0x2c, 0x9d, 0x7b, 0xf8, 0xc1, 0x78, 0xfb, 0x95, 0xc0, 0xf9, 0x69, 0x79, 0xbe, 0x8d, 0xe4, 0xfa, 0x6b, 0x7a, 0x5e, 0x85, 0xc8, 0xfe, 0x21, 0x7b, 0xb4, 0x7c, 0x51, 0x7b, 0xa9, 0xd7, 0x96, 0xc5, 0x82, 0x7c, 0xcd, 0xd7, 0x4a, 0xbc, 0xf6, 0x7d, 0x5a, 0xd8, 0x5c, 0xb6, 0x02, 0x7d, 0xb7, 0xd9, 0xb2, 0xaf, 0x33, 0x7d, 0x3f, 0xdb, 0x77, 0xa7, 0xb4, 0x7c, 0x73, 0xdd, 0x49, 0xa0, 0x55, 0x7b, 0xc6, 0xde, 0xb2, 0x98, 0x68, 0x7a, 0xe1, 0xdf, 0xfd, 0x90, 0x85, 0x79, 0xe2, 0xe1, 0x3c, 0x89, 0x2d, 0x78, 0xc5, 0xe2, 0x28, 0x81, 0xb5, 0x77, 0xea, 0xe2, 0xad, 0x7b, 0x0b, 0x77, 0x15, 0xe2, 0xf2, 0x74, 0x7a, 0x76, 0x4c, 0xe3, 0x32, 0x6e, 0x32, 0x75, 0xa4, 0xe3, 0x80, 0x68, 0x64, 0x75, 0x02, 0xe3, 0xd6, 0x62, 0x92, 0x74, 0x45, 0xe3, 0xb4, 0x5c, 0x76, 0x73, 0x92, 0xe3, 0x3c, 0x56, 0x0f, 0x81, 0xaf, 0xd2, 0xb2, 0xc6, 0x8c, 0x83, 0x36, 0xd2, 0x9d, 0xbe, 0x8a, 0x83, 0xed, 0xd3, 0xb7, 0xb7, 0xaf, 0x84, 0x6f, 0xd4, 0xd5, 0xb0, 0xeb, 0x84, 0x43, 0xd6, 0x3c, 0xa9, 0x7d, 0x83, 0xdd, 0xd7, 0xce, 0xa2, 0x0e, 0x83, 0x76, 0xd9, 0x33, 0x9a, 0x29, 0x82, 0xee, 0xda, 0x78, 0x92, 0x1a, 0x82, 0x1e, 0xdb, 0x8a, 0x8a, 0x6f, 0x81, 0x2e, 0xdc, 0x82, 0x82, 0xec, 0x80, 0x3f, 0xdd, 0x4a, 0x7c, 0x12, 0x7f, 0x54, 0xdd, 0xfe, 0x75, 0x80, 0x7e, 0x76, 0xde, 0xa2, 0x6f, 0x14, 0x7d, 0xc4, 0xdf, 0x1f, 0x69, 0x49, 0x7d, 0x25, 0xdf, 0x9b, 0x63, 0x84, 0x7c, 0x66, 0xdf, 0xae, 0x5d, 0x7f, 0x7b, 0xe7, 0xdf, 0x65, 0x57, 0x45, 0x87, 0x61, 0xce, 0x95, 0xc8, 0x8d, 0x89, 0x4e, 0xce, 0x81, 0xc0, 0xc6, 0x8a, 0x4a, 0xcf, 0x61, 0xb9, 0xb3, 0x8b, 0x0e, 0xd0, 0x65, 0xb2, 0xc4, 0x8b, 0x55, 0xd1, 0x96, 0xab, 0x80, 0x8b, 0x33, 0xd2, 0xf0, 0xa4, 0x04, 0x8a, 0xf8, 0xd4, 0x4e, 0x9c, 0x42, 0x8a, 0x9e, 0xd5, 0x8f, 0x94, 0x0d, 0x8a, 0x0e, 0xd6, 0xbc, 0x8c, 0x24, 0x89, 0x44, 0xd7, 0xcc, 0x84, 0x6f, 0x88, 0x67, 0xd8, 0xc7, 0x7d, 0x55, 0x87, 0x82, 0xd9, 0xa9, 0x76, 0xd8, 0x86, 0x9c, 0xda, 0x7c, 0x70, 0x57, 0x85, 0xf2, 0xdb, 0x40, 0x6a, 0x7d, 0x85, 0x53, 0xdb, 0xf8, 0x64, 0xac, 0x84, 0xbd, 0xdc, 0x74, 0x5e, 0xcd, 0x84, 0x90, 0xdc, 0x56, 0x58, 0xb2, 0x8d, 0x56, 0xca, 0xaa, 0xcb, 0x53, 0x8f, 0x25, 0xca, 0x94, 0xc3, 0x7d, 0x90, 0x96, 0xcb, 0x20, 0xbc, 0x1c, 0x91, 0x83, 0xcc, 0x00, 0xb4, 0xf3, 0x92, 0x53, 0xcd, 0x0a, 0xad, 0xca, 0x92, 0x70, 0xce, 0x22, 0xa6, 0x29, 0x92, 0x78, 0xcf, 0x51, 0x9e, 0x88, 0x92, 0x31, 0xd0, 0x8d, 0x96, 0x46, 0x91, 0xc1, 0xd1, 0xbb, 0x8e, 0x26, 0x91, 0x20, 0xd2, 0xd8, 0x86, 0x56, 0x90, 0x6b, 0xd3, 0xf5, 0x7e, 0xd4, 0x8f, 0x8f, 0xd4, 0xfd, 0x78, 0x5d, 0x8e, 0xae, 0xd5, 0xf8, 0x71, 0xe4, 0x8d, 0xfe, 0xd6, 0xfe, 0x6b, 0xe0, 0x8d, 0x67, 0xd7, 0xfe, 0x66, 0x03, 0x8c, 0xc4, 0xd8, 0xec, 0x60, 0x29, 0x8c, 0xe9, 0xd8, 0xfb, 0x5a, 0x08, 0x93, 0xc0, 0xc7, 0x0c, 0xce, 0x33, 0x95, 0x9a, 0xc7, 0x34, 0xc6, 0x55, 0x97, 0x43, 0xc7, 0x7f, 0xbe, 0xbf, 0x98, 0x5e, 0xc8, 0x25, 0xb7, 0x7b, 0x99, 0x7e, 0xc8, 0xd3, 0xb0, 0x60, 0x99, 0xc6, 0xc9, 0x94, 0xa8, 0xb0, 0x99, 0xe5, 0xca, 0x4e, 0xa1, 0x0c, 0x99, 0xcd, 0xcb, 0x47, 0x99, 0x0c, 0x99, 0xba, 0xcc, 0x6f, 0x91, 0x19, 0x99, 0x1c, 0xcd, 0x7d, 0x89, 0x58, 0x98, 0x8b, 0xce, 0xaa, 0x81, 0xa2, 0x97, 0xc6, 0xcf, 0xe8, 0x7a, 0xce, 0x96, 0xec, 0xd0, 0xf8, 0x74, 0x25, 0x96, 0x44, 0xd2, 0x1c, 0x6d, 0xd6, 0x95, 0xbe, 0xd3, 0x4a, 0x67, 0xda, 0x95, 0x28, 0xd4, 0x6d, 0x61, 0xe1, 0x95, 0x90, 0xd5, 0x3c, 0x5b, 0xad, 0x9a, 0x7a, 0xc3, 0x88, 0xd1, 0x3b, 0x9c, 0x67, 0xc3, 0x77, 0xc9, 0x3a, 0x9e, 0x33, 0xc3, 0x92, 0xc1, 0xa7, 0x9f, 0x82, 0xc3, 0xf3, 0xba, 0x41, 0xa0, 0xb9, 0xc4, 0x66, 0xb3, 0x05, 0xa1, 0x5a, 0xc4, 0xe7, 0xab, 0x77, 0xa1, 0x89, 0xc5, 0x72, 0xa3, 0xac, 0xa1, 0x82, 0xc6, 0x17, 0x9b, 0xc2, 0xa1, 0x6b, 0xc6, 0xf0, 0x93, 0xc8, 0xa0, 0xee, 0xc7, 0xc7, 0x8b, 0xfd, 0xa0, 0x5a, 0xc8, 0xb6, 0x84, 0x65, 0x9f, 0xc9, 0xc9, 0xdf, 0x7d, 0x53, 0x9f, 0x1b, 0xcb, 0x28, 0x76, 0xb9, 0x9e, 0x7d, 0xcc, 0x74, 0x70, 0x30, 0x9e, 0x19, 0xcd, 0xc2, 0x6a, 0x03, 0x9d, 0xa3, 0xcf, 0x1a, 0x63, 0xe2, 0x9d, 0x86, 0xd0, 0x86, 0x5d, 0xac, 0xa1, 0xa9, 0xbf, 0x34, 0xd4, 0x5f, 0xa3, 0xe8, 0xbe, 0xe2, 0xcc, 0x8a, 0xa5, 0xcf, 0xbe, 0xce, 0xc5, 0x2e, 0xa7, 0x72, 0xbe, 0xde, 0xbd, 0xd8, 0xa8, 0x89, 0xbf, 0x36, 0xb6, 0x5b, 0xa9, 0x6f, 0xbf, 0x9c, 0xae, 0xd5, 0xa9, 0xa7, 0xc0, 0x3c, 0xa6, 0xe8, 0xa9, 0xb1, 0xc0, 0xcc, 0x9e, 0xfd, 0xa9, 0x5f, 0xc1, 0x7f, 0x96, 0xe0, 0xa9, 0x09, 0xc2, 0x46, 0x8e, 0xe5, 0xa8, 0x72, 0xc3, 0x31, 0x87, 0x45, 0xa7, 0xf1, 0xc4, 0x3c, 0x7f, 0xcd, 0xa7, 0x42, 0xc5, 0x9e, 0x79, 0x1c, 0xa6, 0x94, 0xc6, 0xfc, 0x72, 0x7b, 0xa6, 0x3b, 0xc8, 0x48, 0x6c, 0x1f, 0xa5, 0xfe, 0xc9, 0x8b, 0x65, 0xed, 0xa6, 0x1e, 0xcb, 0x28, 0x60, 0x00, 0xaa, 0x60, 0xba, 0x02, 0xd8, 0x8e, 0xac, 0x99, 0xb9, 0x7f, 0xd0, 0xa2, 0xae, 0x79, 0xb9, 0x6d, 0xc9, 0x3e, 0xb0, 0x0d, 0xb9, 0x4c, 0xc1, 0xec, 0xb1, 0x18, 0xb9, 0x79, 0xba, 0x58, 0xb1, 0xd2, 0xb9, 0xaf, 0xb2, 0xa9, 0xb2, 0x33, 0xba, 0x19, 0xaa, 0xb4, 0xb2, 0x4a, 0xba, 0x8f, 0xa2, 0x96, 0xb2, 0x02, 0xbb, 0x44, 0x9a, 0x71, 0xb1, 0x8f, 0xbc, 0x10, 0x92, 0x49, 0xb0, 0xfd, 0xbd, 0x13, 0x8a, 0xa1, 0xb0, 0x64, 0xbe, 0x2c, 0x83, 0x1f, 0xaf, 0xba, 0xbf, 0xa9, 0x7c, 0x05, 0xae, 0xfe, 0xc1, 0x2b, 0x75, 0x1a, 0xae, 0x73, 0xc2, 0x8b, 0x6e, 0x65, 0xae, 0x4b, 0xc3, 0xa2, 0x67, 0xf3, 0xae, 0x4e, 0xc4, 0xf1, 0x61, 0xa5, 0xb3, 0xd2, 0xb4, 0x4f, 0xdc, 0xdb, 0xb5, 0xc1, 0xb4, 0x0d, 0xd5, 0x02, 0xb7, 0xc0, 0xb4, 0x11, 0xcd, 0x7b, 0xb8, 0xd8, 0xb3, 0xe3, 0xc5, 0xf7, 0xb9, 0xd6, 0xb3, 0xd8, 0xbe, 0x69, 0xba, 0x60, 0xb4, 0x08, 0xb6, 0x89, 0xba, 0xcf, 0xb4, 0x47, 0xae, 0x9c, 0xba, 0xc9, 0xb4, 0xb7, 0xa6, 0x34, 0xba, 0xae, 0xb5, 0x41, 0x9d, 0xdc, 0xba, 0x2b, 0xb5, 0xe2, 0x95, 0x7a, 0xb9, 0xa3, 0xb6, 0xb3, 0x8d, 0x6f, 0xb9, 0x06, 0xb7, 0xbf, 0x85, 0xeb, 0xb8, 0x72, 0xb8, 0xf3, 0x7e, 0xa1, 0xb7, 0xdd, 0xba, 0x7d, 0x77, 0xe7, 0xb7, 0x3a, 0xbc, 0x0a, 0x71, 0x1f, 0xb6, 0xfb, 0xbd, 0x55, 0x6a, 0x4f, 0xb6, 0xd0, 0xbe, 0xab, 0x63, 0x83, 0xbd, 0xb6, 0xae, 0x1c, 0xe1, 0xc1, 0xbf, 0x67, 0xad, 0xcd, 0xd9, 0xbe, 0xc1, 0x06, 0xad, 0xa9, 0xd2, 0x17, 0xc2, 0x03, 0xad, 0x97, 0xca, 0x63, 0xc2, 0xd4, 0xad, 0x93, 0xc2, 0xb0, 0xc3, 0x53, 0xad, 0xb8, 0xba, 0xbf, 0xc3, 0x9c, 0xad, 0xed, 0xb2, 0xaa, 0xc3, 0x89, 0xae, 0x4c, 0xaa, 0x3b, 0xc3, 0x55, 0xae, 0xc7, 0xa1, 0xa1, 0xc2, 0xe5, 0xaf, 0x66, 0x99, 0x2e, 0xc2, 0x6c, 0xb0, 0x0a, 0x90, 0xbb, 0xc1, 0xec, 0xb1, 0x1c, 0x89, 0x1a, 0xc1, 0x5c, 0xb2, 0x2e, 0x81, 0x86, 0xc0, 0xe6, 0xb3, 0x98, 0x7a, 0xb6, 0xc0, 0x61, 0xb5, 0x07, 0x73, 0xff, 0xc0, 0x07, 0xb6, 0x64, 0x6d, 0x29, 0xbf, 0xf1, 0xb7, 0xbe, 0x66, 0x12, 0xc7, 0x92, 0xa7, 0xb4, 0xe6, 0xf8, 0xc9, 0x13, 0xa7, 0x41, 0xde, 0xae, 0xca, 0x82, 0xa7, 0x54, 0xd7, 0x10, 0xcb, 0x57, 0xa7, 0x1b, 0xcf, 0x54, 0xcc, 0x17, 0xa7, 0x13, 0xc7, 0x6f, 0xcc, 0xab, 0xa6, 0xf6, 0xbf, 0x7d, 0xcc, 0xc8, 0xa7, 0x1b, 0xb7, 0x26, 0xcc, 0xb9, 0xa7, 0x2f, 0xae, 0xbb, 0xcc, 0x61, 0xa7, 0x90, 0xa6, 0x26, 0xcc, 0x0d, 0xa8, 0x1c, 0x9d, 0xae, 0xcb, 0x97, 0xa8, 0xdd, 0x95, 0x66, 0xcb, 0x18, 0xa9, 0xcd, 0x8d, 0x57, 0xca, 0x8a, 0xab, 0x0a, 0x85, 0xa6, 0xca, 0x09, 0xac, 0x56, 0x7e, 0x25, 0xc9, 0xa1, 0xad, 0xd7, 0x77, 0x23, 0xc9, 0x28, 0xaf, 0x58, 0x70, 0x2e, 0xc9, 0x53, 0xb0, 0xa2, 0x68, 0xa1, 0xd0, 0x69, 0xa0, 0xbd, 0xeb, 0x79, 0xd1, 0x9d, 0xa0, 0x5f, 0xe3, 0x59, 0xd2, 0xa3, 0xa0, 0x41, 0xdb, 0x7d, 0xd3, 0x7d, 0xa0, 0x30, 0xd3, 0xb3, 0xd4, 0x3b, 0xa0, 0x1d, 0xcb, 0xc3, 0xd4, 0xc2, 0x9f, 0xfc, 0xc3, 0xa1, 0xd5, 0x17, 0xa0, 0x03, 0xbb, 0x5d, 0xd5, 0x36, 0xa0, 0x1f, 0xb2, 0xf5, 0xd5, 0x17, 0xa0, 0x6e, 0xaa, 0x8e, 0xd4, 0xd3, 0xa0, 0xd9, 0xa2, 0x21, 0xd4, 0x93, 0xa1, 0x88, 0x99, 0xde, 0xd4, 0x51, 0xa2, 0x4e, 0x91, 0xac, 0xd3, 0xfc, 0xa3, 0x95, 0x89, 0xeb, 0xd3, 0x77, 0xa4, 0xda, 0x82, 0x31, 0xd3, 0x38, 0xa6, 0x56, 0x7a, 0xee, 0xd2, 0xf4, 0xa7, 0xd5, 0x73, 0xdb, 0xd2, 0xed, 0xa9, 0x2c, 0x6c, 0x5c, 0xd7, 0xfc, 0x9a, 0x0a, 0xef, 0x25, 0xd9, 0x02, 0x99, 0xc3, 0xe7, 0x0c, 0xda, 0x04, 0x99, 0x95, 0xdf, 0x25, 0xdb, 0x16, 0x99, 0x64, 0xd7, 0x47, 0xdc, 0x1d, 0x99, 0x34, 0xcf, 0x6c, 0xdc, 0xae, 0x99, 0x21, 0xc7, 0x47, 0xdd, 0x34, 0x99, 0x17, 0xbf, 0x22, 0xdd, 0x64, 0x99, 0x4b, 0xb6, 0xe0, 0xdd, 0x8b, 0x99, 0x89, 0xae, 0xa5, 0xdd, 0x60, 0x99, 0xe5, 0xa6, 0x82, 0xdd, 0x35, 0x9a, 0x5a, 0x9e, 0x5f, 0xdd, 0x13, 0x9b, 0x13, 0x96, 0x4d, 0xdc, 0xef, 0x9b, 0xf0, 0x8e, 0x4d, 0xdc, 0xc2, 0x9d, 0x47, 0x86, 0x90, 0xdc, 0x96, 0x9e, 0xa6, 0x7e, 0xe1, 0xdc, 0x9e, 0xa0, 0x34, 0x77, 0x7d, 0xdc, 0xbe, 0xa2, 0x0b, 0x70, 0x14, 0xdf, 0x1b, 0x93, 0x6e, 0xf2, 0xd0, 0xe0, 0x60, 0x93, 0x2d, 0xea, 0x9e, 0xe1, 0x59, 0x93, 0x04, 0xe2, 0xb5, 0xe2, 0x55, 0x92, 0xcf, 0xda, 0xc7, 0xe3, 0x4a, 0x92, 0x91, 0xd2, 0xdd, 0xe4, 0x02, 0x92, 0x6f, 0xca, 0xd5, 0xe4, 0xa1, 0x92, 0x5b, 0xc2, 0xc0, 0xe5, 0x0e, 0x92, 0x7f, 0xba, 0xa8, 0xe5, 0x63, 0x92, 0xbf, 0xb2, 0x8c, 0xe5, 0x8f, 0x93, 0x03, 0xaa, 0x8e, 0xe5, 0xb6, 0x93, 0x56, 0xa2, 0xa1, 0xe5, 0xd4, 0x93, 0xf3, 0x9a, 0xb8, 0xe5, 0xf7, 0x94, 0xb4, 0x92, 0xd2, 0xe6, 0x2b, 0x95, 0xdd, 0x8a, 0xf9, 0xe6, 0x44, 0x97, 0x27, 0x83, 0x27, 0xe6, 0xdf, 0x98, 0xe5, 0x7b, 0x86, 0xe7, 0xaf, 0x9a, 0xfd, 0x73, 0x3d, 0xe5, 0x13, 0x8d, 0x6d, 0xf6, 0xbe, 0xe6, 0x5d, 0x8d, 0x27, 0xee, 0x63, 0xe7, 0x74, 0x8c, 0xe8, 0xe6, 0x67, 0xe8, 0x7e, 0x8c, 0xa9, 0xde, 0x69, 0xe9, 0x89, 0x8c, 0x5f, 0xd6, 0x56, 0xea, 0x84, 0x8c, 0x1d, 0xce, 0x4a, 0xeb, 0x50, 0x8b, 0xfe, 0xc6, 0x54, 0xec, 0x09, 0x8b, 0xec, 0xbe, 0x66, 0xec, 0x83, 0x8c, 0x10, 0xb6, 0x7f, 0xec, 0xf3, 0x8c, 0x3a, 0xae, 0x9c, 0xed, 0x4d, 0x8c, 0x86, 0xa6, 0xc6, 0xed, 0xaa, 0x8c, 0xe1, 0x9e, 0xef, 0xee, 0x04, 0x8d, 0xa8, 0x97, 0x1e, 0xee, 0x63, 0x8e, 0x7a, 0x8f, 0x46, 0xee, 0xd8, 0x8f, 0xb1, 0x87, 0x5e, 0xef, 0x6d, 0x90, 0xf8, 0x7f, 0x72, 0xf2, 0x5a, 0x93, 0x2d, 0x76, 0xfc, 0xeb, 0x7b, 0x87, 0x50, 0xfa, 0x32, 0xec, 0xa7, 0x87, 0x21, 0xf2, 0x05, 0xed, 0xc6, 0x86, 0xe6, 0xea, 0x07, 0xee, 0xc9, 0x86, 0xa6, 0xe2, 0x13, 0xef, 0xe7, 0x86, 0x4f, 0xd9, 0xeb, 0xf1, 0x24, 0x85, 0xf2, 0xd1, 0xae, 0xf2, 0x29, 0x85, 0xc0, 0xc9, 0xd8, 0xf3, 0x28, 0x85, 0xaf, 0xc2, 0x2b, 0xf3, 0xfd, 0x85, 0xbb, 0xba, 0x7f, 0xf4, 0xb8, 0x85, 0xc6, 0xb2, 0xd1, 0xf5, 0x54, 0x86, 0x0e, 0xab, 0x21, 0xf5, 0xe0, 0x86, 0x6e, 0xa3, 0x6c, 0xf6, 0x7b, 0x87, 0x06, 0x9b, 0xb8, 0xf7, 0x14, 0x87, 0xbd, 0x94, 0x03, 0xf8, 0x23, 0x88, 0xb6, 0x8c, 0x4a, 0xf9, 0xf3, 0x8a, 0x1c, 0x84, 0x83, 0xfe, 0x8c, 0x8b, 0x51, 0x7a, 0xfc, 0xf2, 0x9e, 0x80, 0xad, 0xfd, 0x9a, 0xf3, 0x8b, 0x80, 0xcb, 0xf5, 0x8c, 0xf4, 0x8e, 0x80, 0xbd, 0xed, 0xa7, 0xf5, 0x8d, 0x80, 0x79, 0xe5, 0xa6, 0xf6, 0x8c, 0x80, 0x27, 0xdd, 0x99, 0xf7, 0xd1, 0x7f, 0xa2, 0xd5, 0x52, 0xf9, 0x24, 0x7f, 0x33, 0xcd, 0x70, 0xfa, 0x24, 0x7f, 0x13, 0xc5, 0xf8, 0xfb, 0x21, 0x7e, 0xfb, 0xbe, 0x85, 0xfb, 0xf9, 0x7e, 0xf7, 0xb7, 0x03, 0xfc, 0xca, 0x7f, 0x04, 0xaf, 0x7d, 0xfd, 0x74, 0x7f, 0x67, 0xa7, 0xdd, 0xfe, 0x1c, 0x7f, 0xd0, 0xa0, 0x37, 0xfe, 0xbf, 0x80, 0x47, 0x98, 0x8d, 0xff, 0x9f, 0x80, 0x93, 0x90, 0xd3, 0xff, 0xff, 0x80, 0xf1, 0x88, 0xd7, 0xff, 0xff, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x41, 0x42, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xf8, 0x00, 0x00, 0x01, 0x3c, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xb0, 0xba, 0xff, 0xfe, 0x4f, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x17, 0xff, 0xff, 0x52, 0xe9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x81, 0x00, 0x00, 0x80, 0x81, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0x9e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x5e, 0x4c, 0x00, 0x00, 0x02, 0x30, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0x8f, 0x97, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x08, 0x70, 0x00, 0x00, 0x02, 0x44, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x55, 0x55, 0x00, 0x01, 0xe4, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xf3, 0x4d, 0x00, 0x00, 0x01, 0xdf, 0xff, 0xff, 0xd7, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x0f, 0x84, 0xb6, 0xc2, 0x62, 0x96, 0xb7, 0x86, 0x18, 0xd9, 0x87, 0x35, 0xc7, 0x0a, 0xcf, 0x9c, 0x6f, 0xa0, 0x38, 0xf5, 0x03, 0x90, 0x94, 0x3e, 0x48, 0x79, 0xba, 0x53, 0xd2, 0x36, 0xf0, 0x7b, 0x1c, 0x6a, 0xf6, 0xd5, 0xff, 0xff, 0xd3, 0x2c, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x02, 0x66, 0x66, 0x00, 0x00, 0xf1, 0x63, 0x00, 0x00, 0x0d, 0x47, 0x00, 0x00, 0x13, 0x90, 0x00, 0x00, 0x0a, 0x0f, 0x00, 0x00, 0x03, 0x33, 0x00, 0x00, 0x03, 0x33, 0x6d, 0x42, 0x41, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x50, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x73, 0xec, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x1b, 0x3f, 0xa2, 0xc4, 0xaa, 0x74, 0x1f, 0x99, 0xa0, 0x5a, 0xa3, 0xd1, 0x20, 0x6b, 0x9f, 0x0e, 0xa1, 0x1a, 0x22, 0x7a, 0x9d, 0xa0, 0x9b, 0xa8, 0x25, 0x65, 0x9a, 0xdb, 0x94, 0xc2, 0x24, 0x38, 0x9c, 0x4c, 0x8f, 0x4e, 0x26, 0xf4, 0x9a, 0xb1, 0x8a, 0x8a, 0x26, 0xbd, 0x99, 0x7f, 0x88, 0x14, 0x28, 0x73, 0x8e, 0x71, 0x74, 0x7a, 0x28, 0xe8, 0x8c, 0xaa, 0x66, 0xa3, 0x28, 0xb5, 0x8c, 0x5d, 0x64, 0x91, 0x28, 0x80, 0x8c, 0x11, 0x62, 0x79, 0x20, 0xee, 0x87, 0xb3, 0x4a, 0xc5, 0x21, 0x0d, 0x88, 0xc9, 0x27, 0x85, 0x21, 0x95, 0x89, 0x20, 0x24, 0xad, 0x22, 0x1f, 0x89, 0x6e, 0x22, 0x33, 0x20, 0x61, 0x87, 0x83, 0x1c, 0xd3, 0x17, 0xca, 0xa1, 0x46, 0xb7, 0xba, 0x1a, 0x79, 0x9f, 0x8e, 0xa8, 0xfb, 0x20, 0x13, 0x9d, 0x11, 0xa0, 0x5a, 0x21, 0x12, 0x9b, 0x51, 0x9c, 0xeb, 0x23, 0xf6, 0x99, 0x9e, 0x95, 0x6b, 0x25, 0x92, 0x97, 0x1c, 0x8f, 0xa2, 0x24, 0x29, 0x98, 0xc6, 0x89, 0x8a, 0x29, 0xe5, 0x86, 0x64, 0x75, 0x1a, 0x29, 0x99, 0x8a, 0xb9, 0x72, 0xac, 0x27, 0xcf, 0x8b, 0xdc, 0x64, 0x36, 0x28, 0xd3, 0x8a, 0xd8, 0x62, 0xd8, 0x28, 0x31, 0x89, 0x8c, 0x5c, 0xf8, 0x21, 0xc5, 0x7f, 0x65, 0x28, 0xea, 0x22, 0xb0, 0x80, 0xd6, 0x26, 0x1d, 0x22, 0x0f, 0x86, 0xf7, 0x22, 0x4c, 0x22, 0xdf, 0x87, 0x95, 0x1f, 0xe4, 0x23, 0x9d, 0x88, 0x1c, 0x1d, 0xd9, 0x16, 0xb0, 0x9f, 0x0d, 0xb7, 0x7a, 0x15, 0xfd, 0x9c, 0xc5, 0xb3, 0x12, 0x1a, 0x31, 0x9a, 0xec, 0xa5, 0xbb, 0x20, 0xb7, 0x98, 0x65, 0x9b, 0x64, 0x21, 0xfa, 0x95, 0xe7, 0x96, 0xcd, 0x25, 0x15, 0x93, 0xfc, 0x8d, 0xdb, 0x23, 0x4e, 0x83, 0xc5, 0x78, 0x74, 0x27, 0xc7, 0x88, 0x45, 0x78, 0x0b, 0x29, 0x16, 0x84, 0x63, 0x6b, 0xa0, 0x29, 0xe3, 0x84, 0xe1, 0x63, 0x30, 0x29, 0x1d, 0x86, 0x55, 0x5e, 0x11, 0x23, 0xae, 0x7d, 0xd6, 0x49, 0x0c, 0x23, 0x9f, 0x78, 0x66, 0x27, 0xb8, 0x23, 0xca, 0x7e, 0x1e, 0x24, 0x44, 0x24, 0x98, 0x80, 0xdf, 0x21, 0x7d, 0x24, 0x64, 0x85, 0xbd, 0x1d, 0xc8, 0x25, 0x21, 0x86, 0x75, 0x1b, 0xca, 0x0f, 0xe2, 0x9d, 0xc8, 0xc8, 0x38, 0x12, 0x22, 0x98, 0xc5, 0xb5, 0x60, 0x16, 0x01, 0x96, 0x94, 0xac, 0x3a, 0x1a, 0x61, 0x90, 0xa4, 0x9c, 0xa3, 0x1d, 0x7f, 0x8f, 0xca, 0x94, 0x8e, 0x1f, 0x46, 0x81, 0x4c, 0x7d, 0xeb, 0x23, 0xf4, 0x7e, 0x04, 0x75, 0x79, 0x27, 0x59, 0x7c, 0x13, 0x6d, 0x1c, 0x29, 0x2b, 0x79, 0x72, 0x63, 0x0b, 0x29, 0x62, 0x73, 0x7b, 0x51, 0x6f, 0x28, 0xff, 0x76, 0x79, 0x4d, 0x31, 0x25, 0x35, 0x6d, 0xa1, 0x2a, 0x66, 0x26, 0xec, 0x74, 0x5c, 0x26, 0xcc, 0x25, 0xf6, 0x77, 0x40, 0x22, 0xe2, 0x26, 0x41, 0x7c, 0xfa, 0x1f, 0xcf, 0x27, 0x09, 0x80, 0xe9, 0x1c, 0xc1, 0x26, 0xfa, 0x84, 0x87, 0x19, 0x6f, 0x0c, 0x33, 0x94, 0xcb, 0xc8, 0x74, 0x10, 0x4e, 0x8b, 0x2f, 0xb2, 0xb9, 0x11, 0x81, 0x88, 0x5b, 0xa5, 0x66, 0x15, 0x19, 0x84, 0xbd, 0x98, 0x3a, 0x1b, 0x8f, 0x7b, 0x05, 0x85, 0x38, 0x1d, 0x38, 0x73, 0x90, 0x75, 0x64, 0x22, 0x92, 0x73, 0x11, 0x6e, 0x0c, 0x27, 0x99, 0x6e, 0xd5, 0x63, 0x78, 0x29, 0x4b, 0x68, 0xff, 0x52, 0xc3, 0x29, 0x25, 0x6d, 0x11, 0x4e, 0xcd, 0x26, 0xf0, 0x66, 0xdb, 0x36, 0xb3, 0x25, 0x5c, 0x66, 0x0f, 0x25, 0xab, 0x27, 0x4e, 0x6a, 0x99, 0x22, 0xbe, 0x28, 0xca, 0x72, 0x46, 0x21, 0x75, 0x28, 0xb5, 0x76, 0x30, 0x1e, 0x4d, 0x29, 0x01, 0x7b, 0xe0, 0x1b, 0x97, 0x57, 0xc9, 0x8b, 0x8e, 0x15, 0x45, 0x09, 0x38, 0x89, 0x7a, 0xca, 0x5d, 0x0a, 0x15, 0x88, 0xbd, 0xc0, 0x75, 0x0f, 0xa7, 0x7c, 0x8b, 0xa1, 0xae, 0x14, 0x84, 0x74, 0xf7, 0x8e, 0x4d, 0x19, 0x90, 0x6f, 0x45, 0x81, 0x83, 0x1a, 0xaf, 0x67, 0xeb, 0x71, 0x1d, 0x20, 0xe4, 0x64, 0x05, 0x63, 0xdd, 0x27, 0x13, 0x61, 0xd9, 0x59, 0x7e, 0x28, 0x0f, 0x5e, 0xec, 0x4b, 0xb5, 0x28, 0xa6, 0x58, 0x98, 0x38, 0xf9, 0x28, 0x4e, 0x58, 0x00, 0x28, 0xab, 0x28, 0x65, 0x5d, 0x09, 0x23, 0xc8, 0x27, 0x8d, 0x62, 0xe7, 0x1c, 0xfd, 0x29, 0x4f, 0x68, 0x68, 0x1b, 0xf4, 0x45, 0x94, 0x72, 0x05, 0x19, 0xb9, 0x58, 0xcb, 0x7f, 0xca, 0x18, 0x4e, 0x69, 0x41, 0x8e, 0x5b, 0x19, 0xba, 0x07, 0x04, 0x7d, 0xae, 0xcb, 0xa1, 0x06, 0x55, 0x7d, 0xf8, 0xc2, 0xa2, 0x0d, 0xc4, 0x6e, 0xfb, 0x9e, 0x00, 0x12, 0x22, 0x69, 0xae, 0x8d, 0x76, 0x16, 0xa1, 0x60, 0xd8, 0x7a, 0xdd, 0x16, 0x7b, 0x59, 0xac, 0x6a, 0x90, 0x1f, 0x2f, 0x54, 0xb7, 0x5b, 0x33, 0x25, 0x15, 0x51, 0xac, 0x4f, 0x1a, 0x28, 0xc4, 0x4d, 0xe7, 0x3e, 0x80, 0x2b, 0x08, 0x49, 0xce, 0x2c, 0xe0, 0x2a, 0x85, 0x4e, 0xc6, 0x27, 0xcd, 0x2c, 0x55, 0x53, 0x00, 0x1d, 0x61, 0x39, 0xff, 0x5a, 0xe6, 0x17, 0x62, 0x49, 0xdc, 0x67, 0x5a, 0x17, 0xa1, 0x59, 0x3e, 0x72, 0x34, 0x18, 0xa8, 0x64, 0x29, 0x7c, 0x19, 0x19, 0x03, 0x70, 0x11, 0x87, 0xdf, 0x19, 0x0f, 0x02, 0x2c, 0x6c, 0x56, 0xce, 0x38, 0x01, 0x6a, 0x6a, 0x88, 0xc4, 0x64, 0x09, 0xeb, 0x5d, 0x3a, 0x9e, 0x92, 0x0a, 0xa3, 0x57, 0x7f, 0x88, 0x18, 0x0f, 0x2e, 0x51, 0x79, 0x74, 0x3f, 0x13, 0x34, 0x48, 0x2e, 0x60, 0x8c, 0x1e, 0x0c, 0x43, 0xe1, 0x53, 0xa3, 0x24, 0x7b, 0x40, 0x79, 0x44, 0x6e, 0x29, 0x88, 0x3d, 0x2c, 0x35, 0x06, 0x2c, 0xde, 0x3e, 0xee, 0x2b, 0x51, 0x34, 0x48, 0x45, 0x33, 0x23, 0x54, 0x3e, 0xca, 0x4d, 0xc2, 0x1c, 0xf9, 0x4b, 0xbd, 0x58, 0x78, 0x19, 0x0a, 0x56, 0x7f, 0x61, 0xdf, 0x16, 0xc7, 0x63, 0x74, 0x6e, 0x43, 0x18, 0xfc, 0x71, 0x61, 0x79, 0x48, 0x19, 0xd6, 0x7e, 0xe0, 0x84, 0xe5, 0x1a, 0x09, 0x0c, 0x5d, 0x5b, 0x46, 0xce, 0x93, 0x03, 0xd4, 0x59, 0xc8, 0xc5, 0x69, 0x04, 0x89, 0x4d, 0x3a, 0xa0, 0xe7, 0x06, 0xc3, 0x48, 0x98, 0x89, 0x80, 0x0c, 0x83, 0x40, 0xb8, 0x6f, 0x73, 0x16, 0x3e, 0x39, 0x2b, 0x5d, 0xfe, 0x1b, 0xee, 0x30, 0xe6, 0x4a, 0x6c, 0x23, 0x93, 0x2e, 0x1c, 0x3a, 0xdc, 0x28, 0xf6, 0x28, 0xf6, 0x28, 0xf6, 0x37, 0x64, 0x35, 0x31, 0x29, 0x14, 0x42, 0x82, 0x3e, 0xb7, 0x23, 0xd5, 0x4e, 0x2a, 0x49, 0x49, 0x1f, 0x55, 0x58, 0x96, 0x54, 0x28, 0x1c, 0x0f, 0x62, 0x73, 0x5d, 0xd3, 0x18, 0x86, 0x6e, 0xa8, 0x69, 0x78, 0x19, 0xd2, 0x7a, 0xcc, 0x75, 0xa7, 0x1b, 0x53, 0x87, 0x9e, 0x80, 0x01, 0x1c, 0x37, 0x2b, 0x67, 0x30, 0x11, 0xd1, 0x03, 0x1a, 0x21, 0x33, 0x49, 0xc8, 0x70, 0x04, 0x3e, 0x3d, 0x75, 0xa6, 0x64, 0x08, 0x4d, 0x36, 0xfa, 0x89, 0xb6, 0x16, 0xd4, 0x30, 0xd9, 0x73, 0x41, 0x20, 0xf9, 0x2c, 0x2b, 0x5e, 0x4c, 0x28, 0xc6, 0x27, 0xa3, 0x4c, 0xa4, 0x31, 0x23, 0x25, 0xe0, 0x3d, 0x9b, 0x38, 0x57, 0x24, 0x9c, 0x2e, 0x71, 0x44, 0x92, 0x2c, 0x59, 0x29, 0xb6, 0x4f, 0x6a, 0x35, 0xd1, 0x24, 0xdc, 0x5a, 0x9f, 0x3f, 0x36, 0x21, 0x6e, 0x64, 0xe1, 0x4b, 0x34, 0x1e, 0x2d, 0x6d, 0x8e, 0x56, 0x63, 0x1a, 0x2b, 0x79, 0x63, 0x62, 0x66, 0x1b, 0x4f, 0x8a, 0xa4, 0x72, 0x7e, 0x1e, 0x21, 0x91, 0xfd, 0x78, 0xb0, 0x1e, 0x9b, 0x2b, 0x7e, 0x30, 0x00, 0xd1, 0x09, 0x2b, 0x43, 0x2f, 0xf1, 0xd0, 0xe0, 0x17, 0x13, 0x29, 0xef, 0xb2, 0x35, 0x18, 0x75, 0x27, 0x84, 0x8f, 0xf3, 0x23, 0xee, 0x25, 0xfb, 0x77, 0x2e, 0x2d, 0x1c, 0x23, 0x06, 0x61, 0xd7, 0x36, 0xbf, 0x1d, 0xf9, 0x4f, 0x5c, 0x3e, 0x34, 0x1d, 0x46, 0x3f, 0xb9, 0x45, 0xf9, 0x1c, 0x00, 0x31, 0x52, 0x51, 0xb5, 0x26, 0x5e, 0x28, 0xaf, 0x5b, 0xa1, 0x2a, 0x1a, 0x25, 0x98, 0x64, 0xa2, 0x34, 0xde, 0x21, 0x94, 0x6f, 0xa8, 0x40, 0x24, 0x1e, 0xb0, 0x76, 0xea, 0x4a, 0x6a, 0x1b, 0x17, 0x85, 0xf6, 0x57, 0xfa, 0x1c, 0x77, 0x92, 0xfe, 0x65, 0x14, 0x1f, 0xd7, 0x96, 0xde, 0x70, 0x60, 0x1f, 0x5d, 0x2b, 0x95, 0x2f, 0xf1, 0xd1, 0x0f, 0x2b, 0x70, 0x2f, 0xd3, 0xd0, 0xec, 0x1f, 0x92, 0x24, 0xed, 0xbe, 0x3c, 0x22, 0xe0, 0x1e, 0xb2, 0x9b, 0x30, 0x34, 0x86, 0x1e, 0xa5, 0x7e, 0x4c, 0x42, 0x0b, 0x1f, 0x72, 0x6c, 0xf6, 0x4c, 0x54, 0x1f, 0x35, 0x5d, 0x09, 0x52, 0x9b, 0x1c, 0xcb, 0x4e, 0x21, 0x57, 0x95, 0x1a, 0x8d, 0x3c, 0x9b, 0x5e, 0x45, 0x1d, 0x19, 0x2d, 0xf9, 0x65, 0x55, 0x23, 0x0e, 0x24, 0xfb, 0x6f, 0xdd, 0x25, 0xad, 0x21, 0xa2, 0x77, 0x8c, 0x2f, 0x75, 0x1d, 0xf6, 0x86, 0x31, 0x43, 0x06, 0x1b, 0x84, 0x90, 0x18, 0x48, 0xab, 0x1d, 0x1d, 0x96, 0xe9, 0x54, 0xf8, 0x1e, 0xa3, 0xa0, 0x4b, 0x61, 0x89, 0x1f, 0xeb, 0x2b, 0xab, 0x2f, 0xe2, 0xd1, 0x16, 0x2b, 0x9d, 0x2f, 0xb4, 0xd0, 0xfa, 0x27, 0x15, 0x20, 0xa8, 0xc6, 0xa2, 0x42, 0xec, 0x1b, 0xe3, 0xa5, 0x15, 0x50, 0x81, 0x1e, 0xd8, 0x8b, 0xfd, 0x5b, 0x78, 0x20, 0x37, 0x7b, 0x94, 0x60, 0x5b, 0x1f, 0xc6, 0x69, 0x04, 0x66, 0x19, 0x1c, 0x22, 0x5a, 0x6b, 0x68, 0xc5, 0x18, 0x9e, 0x48, 0xed, 0x6e, 0xef, 0x1a, 0xe1, 0x39, 0x04, 0x73, 0xad, 0x1e, 0xfa, 0x28, 0xc4, 0x7b, 0xd4, 0x20, 0x7e, 0x22, 0xfe, 0x83, 0xfd, 0x21, 0xf7, 0x20, 0x01, 0x8b, 0x4a, 0x2a, 0xb6, 0x18, 0x91, 0x95, 0x53, 0x3f, 0xb4, 0x1a, 0x67, 0x9b, 0x44, 0x4c, 0x52, 0x1d, 0xf7, 0xa5, 0xe9, 0x55, 0x2a, 0x1e, 0x60, 0x2b, 0xc2, 0x2f, 0xd2, 0xd1, 0x1c, 0x2b, 0xca, 0x2f, 0x95, 0xd1, 0x06, 0x4d, 0x51, 0x14, 0x5d, 0xc6, 0xae, 0x5f, 0x75, 0x1b, 0x52, 0xaa, 0xd5, 0x6e, 0x81, 0x1e, 0x42, 0x9a, 0x52, 0x72, 0x67, 0x1f, 0xd4, 0x86, 0xfc, 0x75, 0x0c, 0x1e, 0x71, 0x78, 0xa7, 0x79, 0x2a, 0x1a, 0xd3, 0x67, 0xa0, 0x7c, 0x34, 0x18, 0xe8, 0x56, 0x5e, 0x80, 0x62, 0x19, 0x3e, 0x46, 0x03, 0x84, 0x6e, 0x1a, 0xe4, 0x37, 0x9f, 0x87, 0xee, 0x1e, 0x80, 0x25, 0xdd, 0x91, 0x74, 0x20, 0x80, 0x22, 0xfb, 0x97, 0xc4, 0x21, 0x10, 0x1f, 0xd4, 0xa1, 0x04, 0x22, 0xc4, 0x1e, 0x71, 0xa8, 0x99, 0x40, 0xe8, 0x1b, 0x1f, 0xac, 0x4f, 0x4a, 0xce, 0x1c, 0x74, 0x2c, 0x77, 0x2e, 0xe3, 0xd0, 0x78, 0x2c, 0x9a, 0x2e, 0x99, 0xd0, 0x69, 0x77, 0xca, 0x16, 0x54, 0xca, 0x59, 0x83, 0xd5, 0x17, 0x67, 0xc0, 0x4c, 0x85, 0xec, 0x1b, 0x71, 0xa7, 0x04, 0x88, 0xa3, 0x1c, 0xd5, 0x99, 0x0f, 0x8b, 0xe3, 0x1c, 0x1c, 0x88, 0x20, 0x90, 0x70, 0x1a, 0x9c, 0x76, 0xef, 0x92, 0x97, 0x1b, 0xd5, 0x63, 0xd8, 0x95, 0x62, 0x1b, 0x7c, 0x54, 0x65, 0x97, 0x95, 0x1b, 0x75, 0x44, 0xd0, 0x9b, 0xf8, 0x1c, 0xa9, 0x35, 0xfb, 0x9a, 0xc4, 0x1f, 0x40, 0x25, 0x2a, 0xa2, 0x7a, 0x1e, 0xc1, 0x21, 0x98, 0xae, 0x38, 0x1f, 0xdc, 0x1f, 0x5b, 0xc1, 0xb4, 0x24, 0xf1, 0x21, 0xe0, 0xc3, 0xb7, 0x27, 0x6a, 0x23, 0x1d, 0x2d, 0xc2, 0x36, 0xec, 0xca, 0x1f, 0x81, 0x40, 0x1c, 0xbf, 0xd2, 0x0a, 0x90, 0x23, 0x1b, 0x9a, 0xcc, 0xfa, 0x9a, 0xcf, 0x1a, 0x87, 0xc5, 0x30, 0x9a, 0x2c, 0x1c, 0x8c, 0xb4, 0x0b, 0x9f, 0xd9, 0x1d, 0x84, 0xab, 0x6f, 0xa1, 0xec, 0x1d, 0x08, 0x9a, 0x58, 0x9f, 0xc6, 0x1c, 0x03, 0x82, 0x9d, 0x9e, 0x80, 0x1c, 0x79, 0x6b, 0x94, 0xa0, 0xd8, 0x1d, 0x54, 0x5b, 0x92, 0xa2, 0xf6, 0x1c, 0x2f, 0x4a, 0xeb, 0xa2, 0xfd, 0x1b, 0x8e, 0x3b, 0x96, 0xa6, 0xed, 0x1b, 0x2d, 0x25, 0xd0, 0xbc, 0xc3, 0x1b, 0x27, 0x21, 0xc1, 0xc3, 0x25, 0x22, 0x1e, 0x23, 0xf9, 0xc4, 0xd6, 0x25, 0x35, 0x24, 0xb6, 0xc5, 0xe4, 0x27, 0x2a, 0x25, 0x2e, 0x88, 0x14, 0x1f, 0xb6, 0xd4, 0xe7, 0x97, 0x5f, 0x1f, 0xa7, 0xd1, 0xd6, 0x9f, 0x85, 0x1d, 0x54, 0xcd, 0xaf, 0xa2, 0xcd, 0x1b, 0x5a, 0xc8, 0xe9, 0xb2, 0xaf, 0x1c, 0xbe, 0xc6, 0x7a, 0xbd, 0x1e, 0x1d, 0x23, 0xc2, 0x40, 0xb2, 0x50, 0x1e, 0x12, 0xa5, 0xd8, 0xb5, 0x4d, 0x1d, 0x0d, 0x96, 0x09, 0xbe, 0xbc, 0x1c, 0x02, 0x81, 0xf2, 0xaf, 0xe4, 0x1d, 0x1b, 0x64, 0x53, 0xaf, 0x13, 0x1c, 0x3d, 0x53, 0x5d, 0xae, 0x2b, 0x1a, 0xbe, 0x41, 0x99, 0xb3, 0xf2, 0x14, 0xf1, 0x2f, 0x22, 0xc4, 0x8c, 0x1f, 0x68, 0x26, 0x2f, 0xc5, 0xf1, 0x23, 0x0e, 0x26, 0x63, 0xc6, 0xcd, 0x25, 0x63, 0x26, 0x86, 0xc7, 0x62, 0x26, 0xff, 0x26, 0xa0, 0x1b, 0xb9, 0xa4, 0x53, 0xac, 0x88, 0x1f, 0x4f, 0xa1, 0xee, 0xa5, 0x97, 0x20, 0x28, 0xa0, 0xa5, 0xa2, 0xd2, 0x22, 0x41, 0x9f, 0x2c, 0x9d, 0x3e, 0x25, 0x46, 0x9c, 0x37, 0x96, 0x17, 0x24, 0x24, 0x9d, 0x80, 0x90, 0x7b, 0x26, 0xd7, 0x9b, 0xcd, 0x8b, 0x9b, 0x26, 0x9d, 0x9a, 0x97, 0x89, 0x04, 0x28, 0x2e, 0x8f, 0x59, 0x75, 0x4f, 0x28, 0xdd, 0x8d, 0x45, 0x67, 0x5e, 0x28, 0xa5, 0x8c, 0xf0, 0x65, 0x0d, 0x28, 0x67, 0x8c, 0x9a, 0x62, 0xb2, 0x20, 0xea, 0x88, 0xde, 0x4b, 0x41, 0x20, 0xfa, 0x89, 0xd5, 0x28, 0x02, 0x21, 0x8a, 0x8a, 0x1f, 0x24, 0xef, 0x1f, 0x8d, 0x88, 0xfa, 0x1f, 0x37, 0x1e, 0x1f, 0x8f, 0xd5, 0x1a, 0xc4, 0x18, 0x89, 0xa3, 0x1a, 0xb9, 0xc7, 0x1a, 0x78, 0xa1, 0x57, 0xab, 0x73, 0x1f, 0xc5, 0x9e, 0xb8, 0xa2, 0x3d, 0x20, 0xd2, 0x9c, 0xf7, 0x9e, 0xb0, 0x23, 0xd0, 0x9b, 0x21, 0x96, 0xed, 0x25, 0x74, 0x98, 0x6c, 0x90, 0xeb, 0x24, 0x1a, 0x99, 0xdb, 0x8a, 0x9b, 0x27, 0x15, 0x96, 0xfc, 0x86, 0x2d, 0x29, 0x30, 0x8c, 0x23, 0x73, 0x4e, 0x27, 0xc5, 0x8c, 0x65, 0x64, 0xa0, 0x28, 0xc4, 0x8b, 0x46, 0x63, 0x26, 0x27, 0xef, 0x89, 0xa7, 0x5b, 0xa7, 0x20, 0xea, 0x86, 0x0f, 0x47, 0xf4, 0x21, 0xac, 0x85, 0x99, 0x25, 0xad, 0x22, 0x0d, 0x87, 0xf2, 0x22, 0x6a, 0x22, 0xbc, 0x88, 0x69, 0x1f, 0x9d, 0x23, 0x8b, 0x88, 0xeb, 0x1d, 0x70, 0x17, 0x8e, 0xa1, 0x1c, 0xb9, 0xcd, 0x16, 0xdf, 0x9f, 0x1b, 0xb5, 0xb7, 0x19, 0xbc, 0x9c, 0xb3, 0xa8, 0x54, 0x20, 0x69, 0x9a, 0x0d, 0x9d, 0x55, 0x21, 0xc3, 0x97, 0x78, 0x98, 0x7f, 0x24, 0xfb, 0x95, 0x4b, 0x8f, 0x2e, 0x23, 0x1b, 0x85, 0x10, 0x79, 0xf1, 0x27, 0xb6, 0x88, 0xed, 0x78, 0xe9, 0x28, 0xb5, 0x87, 0xdf, 0x6f, 0x9e, 0x28, 0x65, 0x87, 0x10, 0x63, 0x6c, 0x28, 0xfd, 0x87, 0xf4, 0x5f, 0x80, 0x22, 0x76, 0x7f, 0xd6, 0x48, 0x8d, 0x22, 0xeb, 0x7b, 0xe2, 0x27, 0xd1, 0x23, 0xa2, 0x7e, 0xcf, 0x24, 0x40, 0x23, 0xf2, 0x85, 0x3f, 0x20, 0x02, 0x24, 0x64, 0x86, 0x79, 0x1d, 0x4a, 0x25, 0x33, 0x87, 0x30, 0x1b, 0x2b, 0x10, 0x36, 0xa0, 0x60, 0xca, 0xad, 0x12, 0x39, 0x9b, 0x40, 0xb8, 0x08, 0x15, 0xdb, 0x98, 0xc3, 0xae, 0xf5, 0x1a, 0x00, 0x92, 0x9c, 0x9e, 0xe3, 0x1d, 0x7a, 0x92, 0x05, 0x96, 0xa3, 0x1f, 0x68, 0x86, 0x3f, 0x83, 0x7f, 0x25, 0xdd, 0x7e, 0xec, 0x78, 0x77, 0x27, 0x55, 0x7d, 0x6f, 0x6e, 0x13, 0x29, 0x25, 0x7b, 0x94, 0x64, 0xf4, 0x29, 0xc1, 0x77, 0xed, 0x56, 0xac, 0x28, 0x89, 0x77, 0xec, 0x4c, 0xb2, 0x24, 0x6f, 0x70, 0xc7, 0x2d, 0x33, 0x26, 0xe3, 0x75, 0xe5, 0x27, 0x00, 0x25, 0xa9, 0x7a, 0x82, 0x22, 0x24, 0x26, 0x66, 0x7d, 0x86, 0x1f, 0x48, 0x26, 0x7e, 0x84, 0x28, 0x1a, 0x93, 0x27, 0x3f, 0x85, 0x27, 0x18, 0x8d, 0x0c, 0xb6, 0x97, 0xe1, 0xcb, 0x17, 0x0f, 0xbc, 0x96, 0x3c, 0xc4, 0x46, 0x11, 0x40, 0x8a, 0x9f, 0xa8, 0xe5, 0x14, 0xdc, 0x87, 0x88, 0x9a, 0xdd, 0x1b, 0x03, 0x7d, 0xfe, 0x88, 0x08, 0x1e, 0x38, 0x78, 0x3e, 0x79, 0x82, 0x22, 0x2b, 0x74, 0x5f, 0x6f, 0x80, 0x27, 0x9c, 0x70, 0x55, 0x64, 0x7c, 0x28, 0xd3, 0x6c, 0x8d, 0x56, 0xb7, 0x29, 0x0e, 0x6d, 0xd6, 0x4e, 0xe7, 0x26, 0xb4, 0x67, 0xef, 0x36, 0x7a, 0x24, 0xe5, 0x67, 0x9d, 0x25, 0x76, 0x27, 0xa1, 0x6c, 0xfe, 0x22, 0xb2, 0x29, 0x18, 0x73, 0xf3, 0x20, 0xe6, 0x28, 0xf0, 0x79, 0x3b, 0x1c, 0xcf, 0x29, 0xc7, 0x7c, 0xbc, 0x1a, 0x8c, 0x58, 0x35, 0x8e, 0x8a, 0x15, 0x3d, 0x0a, 0xc0, 0x8d, 0x3d, 0xcd, 0x5f, 0x0a, 0xee, 0x8c, 0xa4, 0xc4, 0xc9, 0x0b, 0x10, 0x86, 0x6c, 0xaf, 0xe4, 0x12, 0x56, 0x7c, 0x6e, 0x98, 0xa4, 0x19, 0x79, 0x72, 0xce, 0x85, 0x55, 0x1a, 0x6d, 0x6b, 0xe4, 0x75, 0x53, 0x20, 0x2e, 0x67, 0x43, 0x67, 0x6d, 0x26, 0xe5, 0x63, 0x89, 0x5a, 0xa7, 0x27, 0xcf, 0x61, 0x3a, 0x4d, 0xc9, 0x28, 0x89, 0x5c, 0x36, 0x3b, 0xf6, 0x27, 0xe4, 0x59, 0xd2, 0x28, 0xb8, 0x28, 0x35, 0x5e, 0x1c, 0x22, 0x54, 0x28, 0xa7, 0x64, 0xa6, 0x1c, 0x39, 0x32, 0x44, 0x6c, 0xfd, 0x1a, 0x4a, 0x47, 0xbb, 0x75, 0x11, 0x19, 0x35, 0x5a, 0x4d, 0x80, 0xc1, 0x18, 0x3a, 0x6d, 0xcf, 0x93, 0x36, 0x1a, 0x83, 0x09, 0x8c, 0x81, 0xd7, 0xce, 0xa6, 0x07, 0x1c, 0x81, 0x8e, 0xc6, 0x20, 0x0d, 0x17, 0x77, 0x3a, 0xab, 0xb8, 0x0f, 0x78, 0x70, 0x11, 0x95, 0x09, 0x16, 0xa7, 0x65, 0x61, 0x80, 0x51, 0x17, 0x85, 0x5d, 0x42, 0x6e, 0x55, 0x1e, 0x65, 0x57, 0xdf, 0x5e, 0x98, 0x25, 0x1f, 0x54, 0x4e, 0x50, 0xf5, 0x27, 0x75, 0x50, 0x94, 0x40, 0xcd, 0x29, 0xaa, 0x4c, 0xce, 0x2f, 0x42, 0x2a, 0x79, 0x50, 0x2d, 0x25, 0xa2, 0x32, 0x20, 0x55, 0x59, 0x1b, 0xc2, 0x3f, 0x4d, 0x5e, 0xf8, 0x16, 0x79, 0x4b, 0x91, 0x69, 0xac, 0x17, 0xa5, 0x5b, 0x0b, 0x75, 0x30, 0x18, 0xdd, 0x67, 0x53, 0x80, 0x94, 0x18, 0xcc, 0x73, 0xee, 0x8b, 0xe0, 0x19, 0x98, 0x05, 0x1d, 0x6f, 0xf8, 0xd1, 0xc7, 0x01, 0x36, 0x6e, 0xee, 0xc8, 0x58, 0x07, 0x4c, 0x6b, 0xd1, 0xb2, 0xdc, 0x0b, 0x86, 0x5e, 0xaf, 0x91, 0x6b, 0x0e, 0xa8, 0x57, 0x07, 0x7a, 0x3d, 0x12, 0x76, 0x4a, 0xb4, 0x65, 0x21, 0x1c, 0x5e, 0x47, 0x3c, 0x55, 0xc4, 0x23, 0x69, 0x42, 0xda, 0x46, 0xd0, 0x29, 0x88, 0x40, 0xd3, 0x38, 0x0a, 0x2d, 0xaf, 0x3f, 0x8e, 0x28, 0x17, 0x36, 0xd8, 0x47, 0x77, 0x21, 0xd2, 0x41, 0xb9, 0x50, 0x85, 0x1c, 0x61, 0x4d, 0xac, 0x5a, 0xc9, 0x17, 0xf0, 0x5a, 0x6a, 0x66, 0x7f, 0x18, 0x62, 0x67, 0x36, 0x71, 0xe3, 0x19, 0x33, 0x74, 0x82, 0x7c, 0xd4, 0x19, 0xe7, 0x7f, 0x75, 0x86, 0x19, 0x19, 0xeb, 0x0e, 0x95, 0x60, 0x24, 0xd2, 0x11, 0x03, 0x6c, 0x5e, 0x9a, 0xca, 0x2f, 0x04, 0x57, 0x51, 0x3d, 0xa6, 0xa9, 0x06, 0x64, 0x4c, 0xa1, 0x8e, 0xc2, 0x09, 0x79, 0x45, 0x3c, 0x73, 0xf3, 0x1c, 0x68, 0x3f, 0xd8, 0x64, 0xae, 0x26, 0x0c, 0x3b, 0xa6, 0x54, 0xfd, 0x2e, 0x1b, 0x38, 0xf2, 0x45, 0xbc, 0x34, 0x77, 0x35, 0x6a, 0x35, 0x89, 0x3a, 0x77, 0x37, 0x42, 0x27, 0x86, 0x45, 0x1c, 0x41, 0x36, 0x22, 0xd5, 0x50, 0x35, 0x4b, 0xf3, 0x1e, 0x9b, 0x5a, 0xdf, 0x56, 0xd4, 0x1a, 0xf5, 0x65, 0xa3, 0x61, 0x20, 0x18, 0x8f, 0x72, 0x97, 0x6d, 0xb8, 0x1a, 0x3b, 0x7f, 0xc6, 0x7a, 0xb6, 0x1b, 0xa2, 0x89, 0xd3, 0x81, 0xd7, 0x1b, 0xae, 0x2b, 0x74, 0x30, 0x1b, 0xd1, 0x0e, 0x0d, 0xbf, 0x4a, 0x53, 0xcb, 0x10, 0x04, 0x85, 0x43, 0x6a, 0xaf, 0xd5, 0x02, 0x0a, 0x37, 0x6c, 0x8d, 0xcc, 0x15, 0x9c, 0x36, 0xf9, 0x75, 0xb8, 0x26, 0x71, 0x33, 0xb3, 0x65, 0xe6, 0x32, 0x85, 0x31, 0x14, 0x55, 0x98, 0x3a, 0xfe, 0x2f, 0x7f, 0x46, 0xcc, 0x41, 0xb7, 0x2e, 0x7e, 0x37, 0xd6, 0x47, 0x88, 0x2e, 0x95, 0x28, 0x36, 0x52, 0xa8, 0x39, 0x21, 0x23, 0xce, 0x5c, 0xf0, 0x43, 0x47, 0x20, 0x47, 0x66, 0xa8, 0x4e, 0x3f, 0x1d, 0x0a, 0x6f, 0xb2, 0x5a, 0x12, 0x1a, 0x48, 0x7c, 0xff, 0x65, 0x90, 0x1b, 0x97, 0x8b, 0xa3, 0x73, 0x7d, 0x1e, 0x2f, 0x94, 0xa1, 0x7b, 0xac, 0x1e, 0xa3, 0x2b, 0x8b, 0x30, 0x0b, 0xd1, 0x14, 0x2b, 0x5c, 0x30, 0x06, 0xd0, 0xf7, 0x10, 0xbb, 0x30, 0x13, 0xba, 0x2a, 0x12, 0x22, 0x29, 0xf8, 0x94, 0x08, 0x25, 0x7c, 0x29, 0x73, 0x7b, 0x1c, 0x35, 0x04, 0x28, 0xa7, 0x69, 0x12, 0x40, 0xe1, 0x27, 0xb2, 0x59, 0x65, 0x48, 0x50, 0x26, 0x2e, 0x4a, 0x7d, 0x4e, 0xfb, 0x25, 0x21, 0x3a, 0x31, 0x53, 0xb1, 0x24, 0x40, 0x29, 0x0a, 0x5d, 0x9d, 0x2c, 0x2d, 0x24, 0xbf, 0x67, 0x49, 0x36, 0xe5, 0x20, 0xa0, 0x70, 0xbc, 0x42, 0x59, 0x1e, 0x1b, 0x7a, 0x01, 0x4e, 0x4c, 0x1a, 0xf5, 0x89, 0x03, 0x5b, 0xbc, 0x1d, 0x33, 0x95, 0x76, 0x68, 0x7f, 0x1f, 0xb6, 0x9b, 0x9f, 0x74, 0x30, 0x1f, 0x2a, 0x2b, 0xa0, 0x2f, 0xfb, 0xd1, 0x1b, 0x2b, 0x89, 0x2f, 0xe7, 0xd1, 0x03, 0x1f, 0x90, 0x25, 0x61, 0xc1, 0xfe, 0x25, 0x83, 0x1c, 0xe8, 0x9a, 0x4b, 0x37, 0x8e, 0x1d, 0xa6, 0x81, 0x39, 0x46, 0xd3, 0x1e, 0xcb, 0x6b, 0x65, 0x4f, 0x5c, 0x1e, 0x9b, 0x5e, 0x26, 0x56, 0xe8, 0x1c, 0xe9, 0x4f, 0x35, 0x5a, 0xd6, 0x1a, 0x6c, 0x3d, 0xe9, 0x61, 0x38, 0x1c, 0x77, 0x2f, 0x95, 0x68, 0x16, 0x22, 0x39, 0x24, 0x7d, 0x71, 0x36, 0x27, 0x25, 0x20, 0xe8, 0x7a, 0x3d, 0x31, 0xdc, 0x1c, 0x97, 0x88, 0x7d, 0x44, 0x7c, 0x1b, 0xdf, 0x94, 0x9f, 0x50, 0xb0, 0x1e, 0x75, 0x97, 0xe6, 0x57, 0xe0, 0x1e, 0xaa, 0xa1, 0x6e, 0x64, 0xd8, 0x1f, 0xc4, 0x2b, 0xb7, 0x2f, 0xeb, 0xd1, 0x21, 0x2b, 0xb5, 0x2f, 0xc7, 0xd1, 0x10, 0x2c, 0x68, 0x1d, 0x58, 0xc7, 0x3d, 0x45, 0x42, 0x1b, 0x7c, 0xa8, 0xce, 0x53, 0xe4, 0x1e, 0x82, 0x90, 0x96, 0x60, 0x39, 0x21, 0xf4, 0x7a, 0x10, 0x66, 0x28, 0x1f, 0xdc, 0x6b, 0xde, 0x69, 0x9a, 0x1b, 0x43, 0x5c, 0x78, 0x6c, 0x43, 0x18, 0x54, 0x4a, 0x75, 0x72, 0x12, 0x1a, 0xa4, 0x3b, 0x3f, 0x76, 0x87, 0x1c, 0xf7, 0x2b, 0x5e, 0x7c, 0xcd, 0x20, 0x43, 0x23, 0x1d, 0x86, 0x39, 0x22, 0x43, 0x1f, 0x61, 0x8f, 0x50, 0x2e, 0x7a, 0x18, 0xe8, 0x96, 0xcf, 0x42, 0xc5, 0x1a, 0xa0, 0x9f, 0xe0, 0x50, 0xbe, 0x1d, 0xf2, 0xaa, 0x86, 0x55, 0x77, 0x1d, 0x8b, 0x2b, 0xce, 0x2f, 0xdc, 0xd1, 0x27, 0x2c, 0x83, 0x2e, 0xca, 0xd0, 0x74, 0x53, 0xba, 0x13, 0x51, 0xcc, 0xa7, 0x69, 0x93, 0x15, 0xf3, 0xb7, 0xe4, 0x71, 0xde, 0x1e, 0x1b, 0x9b, 0xf1, 0x77, 0x43, 0x20, 0xc4, 0x88, 0x18, 0x7b, 0xf7, 0x1f, 0x99, 0x78, 0x90, 0x7b, 0xec, 0x19, 0xa4, 0x68, 0x98, 0x80, 0x6c, 0x19, 0x1e, 0x57, 0xa8, 0x82, 0xb9, 0x19, 0xe1, 0x47, 0xfe, 0x88, 0x2b, 0x1a, 0xa7, 0x38, 0xd5, 0x8b, 0xc5, 0x1d, 0xa8, 0x27, 0x90, 0x95, 0x00, 0x20, 0x3a, 0x22, 0xb7, 0x9b, 0x49, 0x21, 0x18, 0x1f, 0x2f, 0xa5, 0x98, 0x25, 0xf5, 0x1e, 0x62, 0xaa, 0xf4, 0x42, 0xbc, 0x1b, 0x26, 0xad, 0x5f, 0x4e, 0x1f, 0x1c, 0x50, 0x2a, 0xf7, 0x33, 0x32, 0xd0, 0x1f, 0x6d, 0xeb, 0x1c, 0xc5, 0xd6, 0x41, 0x7d, 0x7d, 0x19, 0x9a, 0xce, 0x12, 0x89, 0x37, 0x17, 0xe6, 0xc3, 0x9a, 0x88, 0x21, 0x1b, 0x99, 0xaa, 0x4c, 0x8c, 0x55, 0x1d, 0x39, 0x9a, 0xd8, 0x93, 0x82, 0x1d, 0x37, 0x8e, 0x70, 0x92, 0xe1, 0x1a, 0xee, 0x77, 0x7e, 0x96, 0x4b, 0x1c, 0x8c, 0x65, 0xeb, 0x96, 0xba, 0x1b, 0x91, 0x54, 0xd7, 0x9a, 0xcc, 0x1b, 0x95, 0x45, 0x92, 0x9f, 0x65, 0x1b, 0xf0, 0x36, 0x1f, 0x9f, 0xe2, 0x1e, 0x3a, 0x25, 0x13, 0xa8, 0xf0, 0x1e, 0x21, 0x20, 0xf4, 0xbf, 0xcd, 0x22, 0x11, 0x20, 0xd7, 0xc2, 0xf7, 0x26, 0x1c, 0x22, 0xbe, 0xc4, 0xaf, 0x28, 0x52, 0x23, 0xc8, 0x7b, 0x3b, 0x1f, 0xd5, 0xd8, 0x7d, 0x85, 0xa3, 0x1e, 0xc6, 0xd4, 0x11, 0x95, 0x6d, 0x1d, 0xd3, 0xcf, 0x68, 0x9f, 0x3d, 0x1a, 0xdb, 0xc8, 0x3d, 0xaa, 0x73, 0x1c, 0x00, 0xc2, 0x87, 0xa2, 0xaf, 0x1d, 0xdb, 0xac, 0x3e, 0xa6, 0x4a, 0x1d, 0x50, 0x9b, 0x8c, 0xa8, 0xde, 0x1b, 0xf6, 0x88, 0x99, 0xa5, 0xbb, 0x1c, 0x9f, 0x6f, 0x84, 0xa5, 0x82, 0x1d, 0x54, 0x5e, 0x37, 0xa4, 0x9c, 0x1c, 0xb2, 0x4c, 0x71, 0xab, 0x1c, 0x1a, 0x80, 0x3f, 0x5c, 0xae, 0x01, 0x18, 0x7c, 0x27, 0xb3, 0xc1, 0xcd, 0x1e, 0x29, 0x23, 0xdf, 0xc4, 0x68, 0x23, 0x47, 0x24, 0xd8, 0xc5, 0xcf, 0x26, 0x1b, 0x25, 0x61, 0xc6, 0xae, 0x27, 0xe5, 0x25, 0xba, 0x8d, 0x04, 0x21, 0x40, 0xd6, 0x05, 0x99, 0x3d, 0x21, 0x05, 0xd3, 0x91, 0xa3, 0xd0, 0x1e, 0xd0, 0xcf, 0xe0, 0xa7, 0xa5, 0x1a, 0xd3, 0xcc, 0xaa, 0xb4, 0xb5, 0x1c, 0xd7, 0xc8, 0x7a, 0xc1, 0x8a, 0x1d, 0x7a, 0xc3, 0xef, 0xc3, 0x6d, 0x1e, 0x7d, 0xb2, 0xfb, 0xc3, 0x1d, 0x1d, 0xbc, 0x9d, 0xfc, 0xc1, 0xe2, 0x1c, 0x2f, 0x83, 0x92, 0xc0, 0x03, 0x1c, 0x2b, 0x70, 0x40, 0xb0, 0xc5, 0x1c, 0x57, 0x54, 0x10, 0xaf, 0xf4, 0x1a, 0xc4, 0x42, 0x53, 0xbd, 0x1f, 0x11, 0x99, 0x27, 0x36, 0xc5, 0xd3, 0x20, 0x8f, 0x27, 0x10, 0xc6, 0xeb, 0x23, 0xf4, 0x27, 0x0f, 0xc7, 0x98, 0x26, 0x1e, 0x27, 0x12, 0xc8, 0x0d, 0x27, 0x9d, 0x27, 0x16, 0x1c, 0x37, 0xa5, 0xe4, 0xae, 0x9d, 0x1f, 0x13, 0xa3, 0xb0, 0xa7, 0x8d, 0x1f, 0xdc, 0xa2, 0x6d, 0xa4, 0xbe, 0x21, 0xfe, 0xa0, 0xee, 0x9f, 0x09, 0x25, 0x21, 0x9d, 0xc6, 0x97, 0x9a, 0x24, 0x0d, 0x9e, 0xe5, 0x91, 0xd7, 0x26, 0xb3, 0x9d, 0x1a, 0x8c, 0xdc, 0x26, 0x76, 0x9b, 0xe4, 0x8a, 0x21, 0x28, 0x58, 0x91, 0xb6, 0x77, 0x12, 0x2a, 0xe6, 0x8a, 0xfd, 0x67, 0x60, 0x28, 0x8f, 0x8d, 0xaa, 0x65, 0xab, 0x22, 0xcf, 0x8d, 0x2f, 0x5f, 0xe1, 0x23, 0x2e, 0x9a, 0x63, 0x5c, 0x55, 0x20, 0xe3, 0x8b, 0x09, 0x28, 0x92, 0x1e, 0x9e, 0x8a, 0xa8, 0x22, 0x79, 0x1d, 0x4e, 0x90, 0xcb, 0x1d, 0xcc, 0x1d, 0xc0, 0x91, 0x3f, 0x1a, 0x4a, 0x19, 0x4a, 0xa4, 0xef, 0xbb, 0xd0, 0x1b, 0x1d, 0xa3, 0x59, 0xae, 0x01, 0x1f, 0x6a, 0xa0, 0x9b, 0xa4, 0x63, 0x20, 0x85, 0x9e, 0xda, 0xa0, 0xbb, 0x23, 0x9a, 0x9c, 0xe4, 0x98, 0xb4, 0x25, 0x50, 0x99, 0xf9, 0x92, 0x6d, 0x24, 0x05, 0x9b, 0x27, 0x8b, 0xe2, 0x26, 0xf6, 0x98, 0x23, 0x87, 0x34, 0x28, 0xc2, 0x8d, 0x8b, 0x74, 0x2a, 0x28, 0xfd, 0x8c, 0x4a, 0x66, 0xa8, 0x28, 0xb1, 0x8b, 0xd8, 0x63, 0x90, 0x27, 0xa5, 0x89, 0xfb, 0x5a, 0x79, 0x20, 0xab, 0x88, 0x32, 0x46, 0xdf, 0x21, 0x49, 0x88, 0xa1, 0x26, 0x30, 0x22, 0x09, 0x89, 0x19, 0x22, 0x8d, 0x20, 0xb7, 0x86, 0x9f, 0x1c, 0x74, 0x1f, 0x78, 0x8d, 0xdd, 0x17, 0x37, 0x18, 0x6b, 0xa3, 0x2e, 0xbc, 0x1e, 0x17, 0xd9, 0xa1, 0x7d, 0xb8, 0x67, 0x19, 0x4f, 0x9e, 0xd3, 0xab, 0x51, 0x20, 0x0a, 0x9b, 0xfe, 0x9f, 0x9a, 0x21, 0x80, 0x99, 0x59, 0x9a, 0x83, 0x24, 0xd8, 0x96, 0xe2, 0x90, 0xc9, 0x27, 0x78, 0x8e, 0xa3, 0x84, 0xe5, 0x27, 0xa1, 0x89, 0xd5, 0x7a, 0x18, 0x28, 0x97, 0x89, 0x4e, 0x71, 0x28, 0x28, 0x49, 0x88, 0xbc, 0x65, 0x09, 0x28, 0xdf, 0x89, 0x52, 0x60, 0x9f, 0x21, 0x1d, 0x81, 0xe4, 0x48, 0x18, 0x22, 0x2c, 0x7e, 0xd3, 0x28, 0x15, 0x23, 0x52, 0x81, 0x74, 0x24, 0x03, 0x23, 0x60, 0x86, 0x7f, 0x1f, 0x7f, 0x24, 0x66, 0x87, 0x5b, 0x1c, 0xb1, 0x25, 0x48, 0x88, 0x10, 0x1a, 0x6e, 0x10, 0x6d, 0xa2, 0x97, 0xcd, 0x67, 0x12, 0x50, 0x9d, 0xf4, 0xba, 0xf9, 0x15, 0xf9, 0x9b, 0x62, 0xb2, 0x28, 0x17, 0x69, 0x97, 0xec, 0xa6, 0x96, 0x1d, 0x22, 0x94, 0xfd, 0x99, 0xd7, 0x23, 0x36, 0x8f, 0x75, 0x8e, 0xae, 0x22, 0xee, 0x85, 0x47, 0x7b, 0x8e, 0x27, 0x6c, 0x81, 0xb2, 0x73, 0x9e, 0x29, 0x08, 0x7e, 0x37, 0x67, 0xe1, 0x29, 0xa9, 0x7a, 0xa0, 0x59, 0x67, 0x28, 0x2a, 0x79, 0x6d, 0x4c, 0x71, 0x24, 0x2f, 0x72, 0xa4, 0x2d, 0xa0, 0x24, 0xcd, 0x75, 0x8a, 0x25, 0xff, 0x25, 0x68, 0x7d, 0x0d, 0x21, 0x66, 0x26, 0x8d, 0x80, 0xf3, 0x1d, 0x9a, 0x26, 0xc3, 0x84, 0xe1, 0x19, 0x9d, 0x27, 0xbd, 0x86, 0x1e, 0x17, 0xf3, 0x0e, 0x2e, 0x9a, 0x10, 0xcd, 0xe2, 0x0f, 0xff, 0x99, 0xe2, 0xc7, 0x72, 0x12, 0x21, 0x94, 0x63, 0xb5, 0x47, 0x14, 0x12, 0x89, 0xe0, 0x9e, 0x33, 0x1c, 0x09, 0x84, 0xb8, 0x92, 0x0e, 0x1e, 0x2c, 0x7d, 0x67, 0x7f, 0x5d, 0x21, 0xdd, 0x77, 0xb5, 0x74, 0x02, 0x27, 0x0b, 0x75, 0x09, 0x69, 0xff, 0x28, 0xb3, 0x71, 0xb7, 0x5d, 0x16, 0x29, 0xc1, 0x6e, 0xdc, 0x4f, 0xdd, 0x26, 0x58, 0x6a, 0x6b, 0x38, 0xa4, 0x25, 0x31, 0x69, 0x87, 0x25, 0xff, 0x28, 0x04, 0x6f, 0x56, 0x22, 0x7b, 0x28, 0xca, 0x73, 0xd8, 0x1e, 0xec, 0x29, 0x62, 0x7b, 0x6e, 0x1b, 0x2e, 0x2d, 0xe8, 0x81, 0x9a, 0x1a, 0x96, 0x5a, 0xf9, 0x90, 0x3c, 0x15, 0x50, 0x0e, 0xc3, 0x90, 0x16, 0xce, 0xf4, 0x0b, 0x9e, 0x90, 0x4d, 0xc8, 0x22, 0x0a, 0xe0, 0x89, 0x6c, 0xb3, 0x65, 0x13, 0xeb, 0x7f, 0x03, 0x9c, 0x11, 0x19, 0x93, 0x76, 0xa1, 0x88, 0xf9, 0x1a, 0x50, 0x70, 0x4f, 0x7a, 0x58, 0x1f, 0x0f, 0x6a, 0x92, 0x6a, 0xe0, 0x26, 0x86, 0x65, 0x90, 0x5d, 0x09, 0x27, 0x96, 0x63, 0x62, 0x4f, 0xa4, 0x27, 0xbc, 0x5e, 0xaf, 0x3d, 0x7f, 0x28, 0x30, 0x5c, 0xb7, 0x2a, 0xfc, 0x27, 0xc9, 0x5f, 0x52, 0x1f, 0xbc, 0x2c, 0x27, 0x66, 0x7c, 0x1a, 0x61, 0x36, 0xc8, 0x6f, 0xae, 0x19, 0x1a, 0x4d, 0xbc, 0x7a, 0x2b, 0x18, 0x9a, 0x5d, 0xcb, 0x80, 0xc9, 0x16, 0xfe, 0x6e, 0xab, 0x94, 0x34, 0x1a, 0xa8, 0x0c, 0x7a, 0x86, 0x01, 0xd1, 0xa3, 0x08, 0xbd, 0x84, 0x37, 0xc9, 0xc9, 0x09, 0x5b, 0x7f, 0x6d, 0xb7, 0x89, 0x0e, 0xa2, 0x74, 0xb5, 0x9a, 0xa6, 0x14, 0x7b, 0x6a, 0x6d, 0x83, 0xf9, 0x17, 0xd8, 0x61, 0x1b, 0x72, 0x49, 0x1e, 0x55, 0x5b, 0x92, 0x62, 0xa8, 0x25, 0x2a, 0x58, 0x4e, 0x53, 0xec, 0x28, 0x25, 0x53, 0xa0, 0x44, 0x10, 0x29, 0x8f, 0x50, 0x8a, 0x32, 0x3c, 0x2b, 0x8d, 0x4f, 0xdb, 0x1f, 0xeb, 0x35, 0x11, 0x57, 0x7c, 0x1a, 0x0c, 0x41, 0xed, 0x61, 0xb9, 0x16, 0x8c, 0x4d, 0x0d, 0x6b, 0xdd, 0x17, 0xab, 0x5e, 0x1d, 0x78, 0x53, 0x18, 0xfc, 0x69, 0x69, 0x82, 0x56, 0x18, 0xdf, 0x79, 0x54, 0x91, 0xb5, 0x1a, 0x69, 0x08, 0x84, 0x73, 0x1a, 0xd5, 0x42, 0x01, 0x90, 0x73, 0xcf, 0xcd, 0x24, 0x07, 0x35, 0x6f, 0xed, 0xb7, 0x5d, 0x0a, 0x6a, 0x65, 0xce, 0x9b, 0x73, 0x0e, 0x4d, 0x5c, 0x0a, 0x80, 0x4b, 0x1b, 0xe3, 0x54, 0x29, 0x6e, 0x67, 0x26, 0x4f, 0x50, 0x76, 0x5f, 0xbd, 0x2c, 0xfa, 0x4d, 0x0f, 0x50, 0xc6, 0x32, 0x61, 0x4a, 0x0b, 0x41, 0x03, 0x35, 0xd9, 0x48, 0x89, 0x30, 0x32, 0x39, 0x6f, 0x49, 0x69, 0x1f, 0xa7, 0x44, 0xc5, 0x53, 0x74, 0x1b, 0x10, 0x50, 0x56, 0x5d, 0x8a, 0x16, 0xa8, 0x5d, 0xa5, 0x69, 0xf5, 0x18, 0x89, 0x6a, 0x40, 0x75, 0x4b, 0x19, 0x64, 0x76, 0x0f, 0x7f, 0x17, 0x19, 0xeb, 0x83, 0x0e, 0x8b, 0x2b, 0x1a, 0xbc, 0x10, 0xf2, 0x65, 0x16, 0xd5, 0xab, 0x08, 0x30, 0x65, 0x88, 0xce, 0x97, 0x01, 0xcd, 0x60, 0x06, 0xba, 0x83, 0x07, 0x00, 0x54, 0x38, 0x98, 0xc5, 0x17, 0x16, 0x4e, 0x30, 0x7f, 0x0a, 0x24, 0xc2, 0x49, 0xe9, 0x6d, 0xda, 0x30, 0x13, 0x45, 0x79, 0x5e, 0x15, 0x37, 0xb8, 0x42, 0x0b, 0x4e, 0xe7, 0x3e, 0x87, 0x3f, 0x88, 0x3f, 0xa5, 0x43, 0x3b, 0x41, 0x4f, 0x30, 0x22, 0x47, 0x98, 0x43, 0x78, 0x20, 0xd6, 0x52, 0x68, 0x4e, 0x9c, 0x1d, 0x89, 0x5d, 0x1a, 0x59, 0x99, 0x19, 0xc0, 0x68, 0xc2, 0x65, 0x44, 0x19, 0x4f, 0x75, 0xd0, 0x71, 0x1d, 0x1a, 0x72, 0x82, 0x02, 0x7c, 0x3f, 0x1b, 0xd0, 0x8c, 0x76, 0x85, 0x99, 0x1c, 0x18, 0x2b, 0x80, 0x30, 0x25, 0xd1, 0x1a, 0x11, 0x61, 0x51, 0xeb, 0xd0, 0x5d, 0x03, 0x60, 0x4c, 0x23, 0xbd, 0xb9, 0x0e, 0x42, 0x41, 0xfd, 0x97, 0xf2, 0x23, 0x05, 0x41, 0x2c, 0x80, 0x98, 0x32, 0x28, 0x3e, 0x19, 0x6e, 0x37, 0x3c, 0x88, 0x3a, 0xb1, 0x5e, 0x33, 0x44, 0xed, 0x38, 0xb1, 0x4f, 0x86, 0x4b, 0x1e, 0x37, 0xb6, 0x40, 0x26, 0x50, 0x6e, 0x38, 0x69, 0x30, 0xe4, 0x54, 0xdf, 0x3a, 0xc2, 0x22, 0x6b, 0x5e, 0xf8, 0x46, 0x26, 0x1f, 0x67, 0x68, 0x98, 0x51, 0x95, 0x1b, 0xf8, 0x73, 0x0d, 0x5d, 0x49, 0x1a, 0xa9, 0x80, 0xd8, 0x69, 0xe8, 0x1c, 0x89, 0x8e, 0x33, 0x76, 0x5d, 0x1e, 0x60, 0x96, 0xba, 0x7e, 0x81, 0x1e, 0x9b, 0x31, 0xb1, 0x2e, 0x91, 0xd0, 0x71, 0x2b, 0x75, 0x30, 0x19, 0xd1, 0x0e, 0x09, 0x40, 0x35, 0x83, 0xbe, 0xc1, 0x1d, 0x50, 0x34, 0xeb, 0xa0, 0xff, 0x31, 0xfc, 0x33, 0xc8, 0x82, 0xfb, 0x41, 0xa1, 0x32, 0xb0, 0x70, 0x87, 0x4c, 0xb2, 0x31, 0x36, 0x61, 0x96, 0x52, 0x59, 0x2f, 0xb2, 0x53, 0xd3, 0x57, 0x56, 0x2e, 0x62, 0x43, 0x38, 0x5c, 0x52, 0x2d, 0xb4, 0x32, 0x77, 0x60, 0xa1, 0x2f, 0x10, 0x23, 0x05, 0x69, 0x96, 0x39, 0x56, 0x1f, 0xde, 0x73, 0x9d, 0x45, 0xcb, 0x1c, 0x8e, 0x7e, 0x17, 0x52, 0x90, 0x1b, 0x7a, 0x8d, 0x08, 0x62, 0x4b, 0x1f, 0x51, 0x96, 0xcd, 0x6d, 0xc5, 0x20, 0x4e, 0x9e, 0x94, 0x74, 0xea, 0x1f, 0x1e, 0x2b, 0xae, 0x30, 0x04, 0xd1, 0x25, 0x2b, 0xa2, 0x2f, 0xfb, 0xd1, 0x1b, 0x21, 0x99, 0x25, 0x6d, 0xc4, 0x22, 0x33, 0x3c, 0x26, 0x14, 0xa8, 0xc3, 0x44, 0x65, 0x28, 0x1e, 0x8c, 0xb7, 0x50, 0xf3, 0x29, 0x07, 0x78, 0x2f, 0x59, 0x56, 0x28, 0x38, 0x67, 0xcb, 0x5f, 0x2d, 0x26, 0x01, 0x58, 0x27, 0x63, 0xd9, 0x23, 0xbb, 0x47, 0x92, 0x67, 0x5e, 0x21, 0x62, 0x35, 0x11, 0x6a, 0xa6, 0x20, 0x43, 0x23, 0xf9, 0x73, 0x4d, 0x29, 0x7b, 0x1f, 0x98, 0x7c, 0xec, 0x35, 0x1a, 0x1b, 0x78, 0x89, 0xa1, 0x45, 0xc3, 0x1c, 0x10, 0x95, 0xc2, 0x53, 0xe8, 0x1e, 0x97, 0x9d, 0xd3, 0x5f, 0x2c, 0x20, 0x56, 0xa8, 0xa2, 0x6c, 0x75, 0x1d, 0xa5, 0x2b, 0xc4, 0x2f, 0xf6, 0xd1, 0x2d, 0x2b, 0xce, 0x2f, 0xdc, 0xd1, 0x27, 0x39, 0x29, 0x16, 0x34, 0xcb, 0x7c, 0x49, 0x60, 0x1a, 0x18, 0xaf, 0xea, 0x5a, 0x12, 0x1d, 0xd5, 0x91, 0xfb, 0x62, 0xdf, 0x20, 0xbb, 0x7e, 0xcd, 0x69, 0x6b, 0x1f, 0xa7, 0x6d, 0xfc, 0x6c, 0x06, 0x1c, 0x84, 0x5d, 0x2d, 0x70, 0xb4, 0x1a, 0x3f, 0x4c, 0xf8, 0x75, 0x2d, 0x1a, 0x6c, 0x3c, 0xa4, 0x7a, 0xb6, 0x1d, 0x54, 0x2e, 0xa8, 0x7f, 0xd7, 0x20, 0x54, 0x23, 0x27, 0x88, 0x12, 0x24, 0x3a, 0x1e, 0x46, 0x92, 0x5e, 0x33, 0x50, 0x19, 0x75, 0x99, 0xf0, 0x44, 0xdd, 0x1a, 0xcf, 0xa6, 0x5b, 0x53, 0xd1, 0x1e, 0x2a, 0xac, 0x02, 0x59, 0x97, 0x1d, 0x9e, 0x2b, 0x83, 0x31, 0xa4, 0xcf, 0xe7, 0x2c, 0x9b, 0x2e, 0xdd, 0xd0, 0x8b, 0x63, 0x98, 0x19, 0x07, 0xd2, 0x77, 0x71, 0x09, 0x14, 0x64, 0xbe, 0x3a, 0x76, 0x7b, 0x1a, 0xea, 0xa1, 0xff, 0x7d, 0x62, 0x1c, 0xd2, 0x8f, 0x7c, 0x7f, 0x60, 0x1b, 0xbb, 0x7c, 0x74, 0x80, 0x70, 0x19, 0x0e, 0x6a, 0x95, 0x83, 0xed, 0x19, 0x45, 0x59, 0xdf, 0x86, 0xc0, 0x1a, 0x3b, 0x49, 0xc4, 0x8b, 0x9a, 0x1a, 0x68, 0x3a, 0x39, 0x8f, 0x2c, 0x1c, 0xe2, 0x29, 0x19, 0x97, 0xf0, 0x20, 0x0d, 0x22, 0x76, 0xa0, 0x71, 0x21, 0xa7, 0x1e, 0xdb, 0xaa, 0xd4, 0x2a, 0x89, 0x1e, 0x0e, 0xad, 0x5c, 0x47, 0x44, 0x1c, 0xb1, 0xae, 0xb8, 0x51, 0x99, 0x1c, 0x68, 0x2c, 0x90, 0x2e, 0xf6, 0xd0, 0x91, 0x75, 0xb2, 0x1f, 0x88, 0xd8, 0xf3, 0x82, 0x1c, 0x1c, 0xbd, 0xd1, 0xcc, 0x8f, 0x17, 0x18, 0x5b, 0xc7, 0xee, 0x92, 0x54, 0x1b, 0x3c, 0xb6, 0x12, 0x99, 0x12, 0x1e, 0x8f, 0xa5, 0xc5, 0x98, 0xec, 0x1e, 0x08, 0x92, 0x38, 0x98, 0xec, 0x1b, 0xf8, 0x7c, 0x19, 0x98, 0x4f, 0x1c, 0xed, 0x67, 0x0c, 0x9a, 0x19, 0x1c, 0x23, 0x56, 0x6c, 0x9d, 0x0e, 0x1b, 0xa8, 0x45, 0x94, 0xa0, 0xe5, 0x1b, 0x8f, 0x36, 0x7a, 0xa3, 0xc8, 0x1d, 0x5c, 0x25, 0x39, 0xae, 0x88, 0x1d, 0x39, 0x20, 0x2a, 0xc1, 0x98, 0x23, 0xb9, 0x22, 0x13, 0xc4, 0x3a, 0x27, 0x46, 0x23, 0x9d, 0xc5, 0xa7, 0x29, 0x38, 0x24, 0x73, 0x80, 0xbf, 0x21, 0xb0, 0xd9, 0xe7, 0x8b, 0x1f, 0x20, 0xd4, 0xd5, 0xd9, 0x98, 0x41, 0x1f, 0xe2, 0xd2, 0x08, 0xa2, 0x91, 0x1b, 0xb7, 0xcb, 0xe1, 0xae, 0xd8, 0x1c, 0x87, 0xc5, 0x44, 0xaf, 0x67, 0x1d, 0xe2, 0xb7, 0x5f, 0xaf, 0x2f, 0x1d, 0xd9, 0xa2, 0xdf, 0xaf, 0x54, 0x1c, 0x63, 0x8d, 0x49, 0xae, 0x64, 0x1c, 0x9f, 0x75, 0xa8, 0xae, 0x47, 0x1c, 0xe8, 0x61, 0x50, 0xad, 0xd0, 0x1b, 0xbf, 0x50, 0x09, 0xad, 0x38, 0x1a, 0x81, 0x40, 0x13, 0xb8, 0x98, 0x13, 0x68, 0x27, 0x6e, 0xc3, 0x9b, 0x1f, 0xcd, 0x25, 0x1f, 0xc5, 0xac, 0x24, 0x71, 0x25, 0xb8, 0xc6, 0xc7, 0x27, 0x01, 0x26, 0x0e, 0xc7, 0x78, 0x28, 0xa0, 0x26, 0x45, 0x91, 0x11, 0x23, 0x37, 0xd8, 0x2f, 0x9b, 0x1a, 0x22, 0x66, 0xd5, 0x48, 0xa6, 0x62, 0x20, 0x65, 0xd2, 0x22, 0xb5, 0x06, 0x1c, 0xce, 0xcf, 0x3b, 0xb7, 0xfb, 0x1d, 0x33, 0xcb, 0x39, 0xc6, 0x1f, 0x1d, 0xd9, 0xc5, 0xe1, 0xc5, 0x08, 0x1e, 0xa5, 0xb3, 0xdb, 0xc4, 0xd3, 0x1d, 0xd5, 0x9e, 0xd8, 0xc3, 0x8f, 0x1c, 0x46, 0x84, 0x2a, 0xc2, 0x39, 0x1c, 0x2a, 0x71, 0x19, 0xc1, 0xe5, 0x1a, 0x48, 0x5d, 0x63, 0xc2, 0x51, 0x18, 0xf9, 0x4d, 0x29, 0xc3, 0x2b, 0x14, 0xda, 0x30, 0x50, 0xc7, 0x18, 0x21, 0xb8, 0x27, 0xef, 0xc7, 0xe4, 0x24, 0xd8, 0x27, 0xba, 0xc8, 0x62, 0x26, 0xd9, 0x27, 0x9e, 0xc8, 0xb7, 0x28, 0x3b, 0x27, 0x8c, 0x1c, 0xb8, 0xa7, 0x78, 0xb0, 0xb4, 0x1f, 0x73, 0xa5, 0x93, 0xa9, 0x85, 0x1f, 0x85, 0xa4, 0x70, 0xa6, 0xeb, 0x21, 0xb0, 0xa2, 0xee, 0xa1, 0x14, 0x24, 0xf7, 0x9f, 0x91, 0x99, 0x59, 0x23, 0xf0, 0xa0, 0x84, 0x93, 0x6c, 0x26, 0x86, 0x9e, 0xa7, 0x8e, 0x59, 0x26, 0x76, 0x9d, 0xcc, 0x8b, 0xbb, 0x28, 0x7e, 0x94, 0x19, 0x78, 0xd8, 0x28, 0xc8, 0x94, 0xe4, 0x70, 0x8e, 0x28, 0x74, 0x8e, 0xa3, 0x66, 0x7e, 0x26, 0xa8, 0x9b, 0x16, 0x64, 0x05, 0x23, 0xa2, 0x9c, 0x6b, 0x5d, 0x7b, 0x1d, 0x9e, 0x8c, 0x9e, 0x27, 0x07, 0x1c, 0x62, 0x92, 0x06, 0x21, 0xd2, 0x1c, 0xd2, 0x92, 0x7a, 0x1d, 0x78, 0x1d, 0x4b, 0x97, 0xe1, 0x1a, 0x37, 0x1a, 0x0c, 0xa6, 0xc3, 0xbd, 0xd9, 0x1b, 0xc9, 0xa5, 0x5d, 0xb0, 0x8f, 0x1f, 0x03, 0xa2, 0xc4, 0xa6, 0xd8, 0x20, 0x2a, 0xa1, 0x0d, 0xa3, 0x16, 0x23, 0x4c, 0x9e, 0xfe, 0x9a, 0xd8, 0x25, 0x26, 0x9b, 0xd4, 0x94, 0x3d, 0x23, 0xec, 0x9c, 0xc0, 0x8d, 0x71, 0x26, 0xd0, 0x99, 0x96, 0x88, 0x7f, 0x28, 0x67, 0x8e, 0xb6, 0x75, 0x5c, 0x28, 0xf0, 0x8d, 0x2d, 0x67, 0xd4, 0x28, 0x97, 0x8c, 0xa6, 0x64, 0x21, 0x21, 0x22, 0x8c, 0xb7, 0x57, 0xd2, 0x20, 0x67, 0x8b, 0x77, 0x45, 0x00, 0x21, 0x33, 0x8a, 0x12, 0x26, 0xbb, 0x1f, 0xa7, 0x88, 0x56, 0x1f, 0xd5, 0x1e, 0x5a, 0x8f, 0x86, 0x1a, 0x0f, 0x1f, 0xb3, 0x90, 0x9f, 0x17, 0x4a, 0x19, 0x4a, 0xa5, 0x3f, 0xbe, 0x6f, 0x18, 0xd5, 0xa3, 0xe3, 0xbb, 0x15, 0x1a, 0x40, 0xa1, 0x9c, 0xae, 0xb2, 0x1f, 0x9a, 0x9e, 0x4f, 0xa2, 0x4b, 0x21, 0x2b, 0x9b, 0x9f, 0x9c, 0xf2, 0x24, 0xaa, 0x98, 0xdb, 0x92, 0xc6, 0x25, 0x06, 0x96, 0x1c, 0x8c, 0x65, 0x27, 0x7f, 0x8b, 0x23, 0x7b, 0xcb, 0x2a, 0x59, 0x87, 0xa6, 0x72, 0x49, 0x28, 0x37, 0x89, 0xde, 0x65, 0xc3, 0x28, 0xc8, 0x89, 0xb1, 0x60, 0x9b, 0x20, 0xe5, 0x83, 0xa1, 0x48, 0xa9, 0x21, 0xac, 0x80, 0x0a, 0x28, 0xc8, 0x22, 0x59, 0x86, 0x0d, 0x23, 0x27, 0x23, 0x3c, 0x87, 0x9e, 0x1f, 0x07, 0x24, 0x64, 0x88, 0x71, 0x1b, 0xf4, 0x21, 0xa9, 0x8a, 0xc8, 0x12, 0x3f, 0x11, 0x93, 0xa4, 0xf3, 0xd0, 0x2a, 0x12, 0xb8, 0xa0, 0xef, 0xbe, 0x2c, 0x16, 0x86, 0x9e, 0x74, 0xb5, 0xca, 0x16, 0xdd, 0x9a, 0xb1, 0xaa, 0x39, 0x1c, 0xe0, 0x97, 0x61, 0x9c, 0x83, 0x23, 0x0a, 0x91, 0xb6, 0x91, 0x17, 0x23, 0x47, 0x88, 0x25, 0x7e, 0x7f, 0x26, 0x2a, 0x80, 0x5f, 0x72, 0xe0, 0x28, 0xee, 0x81, 0x2e, 0x6b, 0x1e, 0x2a, 0x1e, 0x7f, 0x14, 0x5e, 0xcf, 0x27, 0xcd, 0x7b, 0x17, 0x4c, 0x7b, 0x21, 0x40, 0x75, 0xe3, 0x34, 0x25, 0x24, 0x3c, 0x78, 0xed, 0x25, 0xec, 0x25, 0x76, 0x7d, 0xeb, 0x20, 0xd9, 0x26, 0x05, 0x84, 0x70, 0x1b, 0x52, 0x27, 0x1a, 0x85, 0xc9, 0x18, 0x6b, 0x44, 0x49, 0x90, 0xe9, 0x11, 0x90, 0x13, 0x56, 0x9c, 0x22, 0xcf, 0xcb, 0x10, 0x41, 0x9d, 0xd0, 0xca, 0xfd, 0x12, 0x57, 0x97, 0xca, 0xb9, 0x2f, 0x12, 0x3d, 0x94, 0x72, 0xad, 0x0f, 0x1b, 0xb7, 0x89, 0x96, 0x97, 0x93, 0x1d, 0xad, 0x82, 0x64, 0x85, 0x4d, 0x21, 0x62, 0x7b, 0x2f, 0x75, 0x3c, 0x26, 0xc1, 0x77, 0x80, 0x6c, 0xd7, 0x28, 0xac, 0x74, 0x35, 0x5f, 0x90, 0x29, 0xae, 0x70, 0x8a, 0x50, 0x87, 0x26, 0x0a, 0x6c, 0xd4, 0x3a, 0xf5, 0x24, 0xc7, 0x6b, 0x5f, 0x26, 0xbf, 0x28, 0x83, 0x71, 0x96, 0x21, 0xf2, 0x29, 0x60, 0x76, 0xc0, 0x1d, 0x04, 0x2d, 0xe2, 0x7c, 0xbe, 0x1c, 0x0d, 0x51, 0xce, 0x8c, 0xad, 0x14, 0xbd, 0x5d, 0xd1, 0x92, 0x16, 0x15, 0x67, 0x11, 0x0b, 0x92, 0xc6, 0xd1, 0xd6, 0x0c, 0xa5, 0x94, 0x42, 0xcb, 0xec, 0x0a, 0xff, 0x8d, 0x75, 0xb8, 0x89, 0x11, 0x40, 0x87, 0x2a, 0xa7, 0x4e, 0x15, 0x7c, 0x7e, 0xc3, 0x93, 0x01, 0x1c, 0x40, 0x74, 0x81, 0x7d, 0x8a, 0x1e, 0x42, 0x6d, 0xa9, 0x6e, 0x35, 0x26, 0x09, 0x6a, 0x75, 0x62, 0xba, 0x28, 0xe0, 0x66, 0x9a, 0x53, 0x22, 0x27, 0xe8, 0x62, 0x01, 0x40, 0x98, 0x25, 0x74, 0x5e, 0x50, 0x2b, 0x1d, 0x27, 0xa7, 0x5d, 0xf9, 0x17, 0xf9, 0x32, 0xad, 0x68, 0xe2, 0x19, 0xca, 0x43, 0xfb, 0x72, 0xa5, 0x19, 0xcf, 0x53, 0xd1, 0x7f, 0x51, 0x18, 0x52, 0x67, 0x2d, 0x89, 0x5d, 0x17, 0x05, 0x6f, 0xa8, 0x95, 0x56, 0x1a, 0xd1, 0x0f, 0x48, 0x89, 0x8b, 0xd4, 0xa8, 0x0b, 0x54, 0x88, 0xc9, 0xce, 0x2e, 0x08, 0x2c, 0x86, 0xc9, 0xc2, 0xb2, 0x0f, 0x0d, 0x7a, 0x15, 0xa2, 0xab, 0x14, 0x85, 0x70, 0xaf, 0x8a, 0xc6, 0x1c, 0xd2, 0x68, 0x5d, 0x7a, 0x90, 0x26, 0x1a, 0x63, 0xb0, 0x6b, 0x7f, 0x2c, 0xde, 0x5f, 0xfa, 0x5d, 0x3d, 0x30, 0x74, 0x5c, 0x55, 0x4d, 0xc3, 0x31, 0xe5, 0x59, 0x22, 0x3b, 0x2c, 0x33, 0xcc, 0x58, 0x7f, 0x29, 0x23, 0x36, 0x69, 0x58, 0x9b, 0x16, 0xfa, 0x45, 0xf1, 0x64, 0x00, 0x17, 0x3c, 0x54, 0x1c, 0x70, 0x35, 0x18, 0xb0, 0x61, 0x5e, 0x7b, 0x71, 0x18, 0xdf, 0x6d, 0xe2, 0x87, 0xc4, 0x18, 0xe4, 0x7c, 0xb9, 0x95, 0x7e, 0x1a, 0xcf, 0x0b, 0xcd, 0x77, 0xc0, 0xd8, 0xca, 0x06, 0x30, 0x79, 0x43, 0xd2, 0x15, 0x06, 0x14, 0x79, 0x0e, 0xc2, 0xb4, 0x0a, 0x21, 0x6b, 0x8c, 0xa3, 0x48, 0x14, 0x96, 0x64, 0x37, 0x8b, 0x59, 0x24, 0x77, 0x5d, 0xd3, 0x78, 0x90, 0x2e, 0x67, 0x59, 0x19, 0x69, 0x0d, 0x35, 0xbe, 0x55, 0x3a, 0x59, 0x5d, 0x3a, 0x55, 0x52, 0x4b, 0x49, 0xc7, 0x3e, 0x70, 0x51, 0x63, 0x38, 0xec, 0x42, 0x46, 0x53, 0x26, 0x29, 0x3f, 0x46, 0x50, 0x56, 0x04, 0x19, 0x2d, 0x53, 0x22, 0x60, 0x8e, 0x16, 0x3f, 0x60, 0x4d, 0x6c, 0xf4, 0x18, 0xc6, 0x6d, 0xd8, 0x79, 0x24, 0x19, 0xa7, 0x7b, 0xb8, 0x84, 0x6e, 0x19, 0xf7, 0x88, 0x07, 0x91, 0xe2, 0x1c, 0x31, 0x13, 0xe9, 0x68, 0xd5, 0xd8, 0xf8, 0x0c, 0x79, 0x6a, 0xef, 0xd3, 0xce, 0x01, 0x63, 0x66, 0xfd, 0xc4, 0x40, 0x10, 0x2d, 0x5d, 0xaf, 0xa3, 0x4b, 0x21, 0xf0, 0x57, 0x94, 0x89, 0x2e, 0x2f, 0x77, 0x53, 0x44, 0x77, 0x56, 0x39, 0x3d, 0x4e, 0x86, 0x67, 0x25, 0x41, 0x3f, 0x4b, 0x35, 0x58, 0x31, 0x47, 0xd8, 0x48, 0xd3, 0x48, 0xf7, 0x4c, 0x5c, 0x4b, 0x31, 0x39, 0x4f, 0x50, 0x68, 0x4d, 0x3b, 0x2a, 0x9d, 0x54, 0xe9, 0x51, 0x5a, 0x1c, 0x51, 0x5f, 0x50, 0x5c, 0x99, 0x19, 0x3a, 0x6c, 0x1d, 0x68, 0xb8, 0x19, 0x92, 0x78, 0x42, 0x74, 0xb9, 0x1b, 0x15, 0x85, 0x8b, 0x80, 0x40, 0x1b, 0xf4, 0x8f, 0x4d, 0x89, 0x9d, 0x1c, 0xf3, 0x2b, 0x8d, 0x30, 0x2f, 0xd1, 0x25, 0x15, 0x78, 0x57, 0xdb, 0xd5, 0x37, 0x03, 0x10, 0x52, 0xe2, 0xc7, 0xc7, 0x1a, 0x6e, 0x4e, 0xf2, 0xa7, 0x4e, 0x2e, 0x49, 0x4b, 0x56, 0x8c, 0x81, 0x3d, 0x4c, 0x47, 0x7b, 0x77, 0x8e, 0x47, 0x3e, 0x44, 0x73, 0x67, 0x5a, 0x4f, 0x09, 0x42, 0xa7, 0x58, 0xe4, 0x54, 0xa6, 0x41, 0xa4, 0x49, 0x99, 0x59, 0x71, 0x42, 0x22, 0x39, 0xd7, 0x5d, 0x4a, 0x44, 0x8b, 0x2c, 0x22, 0x61, 0xf7, 0x49, 0xb0, 0x1e, 0xed, 0x6b, 0x11, 0x54, 0xaa, 0x1a, 0xfb, 0x77, 0x8f, 0x61, 0x94, 0x1b, 0x8f, 0x86, 0x2f, 0x6f, 0xce, 0x1d, 0xaf, 0x90, 0xcf, 0x79, 0x50, 0x1e, 0x67, 0x97, 0xe3, 0x80, 0x1d, 0x1d, 0x94, 0x2b, 0xa2, 0x30, 0x1f, 0xd1, 0x2b, 0x2b, 0x8d, 0x30, 0x2e, 0xd1, 0x25, 0x15, 0x1b, 0x3f, 0x32, 0xca, 0xc0, 0x2a, 0x17, 0x3f, 0xb0, 0xad, 0x0c, 0x3e, 0xa8, 0x3e, 0x1d, 0x8b, 0x31, 0x4d, 0x88, 0x3c, 0x60, 0x79, 0x0b, 0x57, 0x2c, 0x3a, 0xb2, 0x6a, 0x2f, 0x5c, 0x17, 0x39, 0x6f, 0x5c, 0x8c, 0x61, 0x28, 0x37, 0x4f, 0x4c, 0x49, 0x65, 0x1c, 0x37, 0x35, 0x3b, 0x96, 0x68, 0xc4, 0x38, 0x94, 0x2c, 0xaa, 0x6c, 0xa0, 0x3c, 0x2e, 0x1f, 0x63, 0x75, 0x37, 0x48, 0xe1, 0x1b, 0xb6, 0x83, 0x1c, 0x57, 0x10, 0x1c, 0x27, 0x90, 0x6d, 0x65, 0x59, 0x1f, 0xbf, 0x97, 0xd7, 0x70, 0x33, 0x1f, 0x5d, 0xa2, 0x37, 0x77, 0x5c, 0x1e, 0xd8, 0x2b, 0xb9, 0x30, 0x0f, 0xd1, 0x32, 0x2b, 0xb9, 0x30, 0x0f, 0xd1, 0x32, 0x2f, 0x4e, 0x2f, 0x4e, 0xc9, 0xae, 0x3f, 0x3a, 0x2f, 0x72, 0xb3, 0x8f, 0x53, 0x0c, 0x32, 0xa0, 0x92, 0x2e, 0x5e, 0x1d, 0x32, 0xfb, 0x7e, 0xb1, 0x64, 0x0e, 0x31, 0x8f, 0x6f, 0xc3, 0x68, 0xcb, 0x2f, 0x9c, 0x60, 0xc5, 0x6c, 0xeb, 0x2d, 0x1f, 0x4f, 0xb5, 0x70, 0x51, 0x2b, 0x74, 0x3e, 0x2b, 0x73, 0x6a, 0x2a, 0x44, 0x2d, 0x07, 0x76, 0xc0, 0x2c, 0xc5, 0x1e, 0x6b, 0x7e, 0xa3, 0x38, 0xee, 0x1b, 0x3a, 0x8c, 0x80, 0x47, 0xb8, 0x1c, 0x9e, 0x97, 0x2e, 0x55, 0x12, 0x1e, 0x99, 0xa1, 0x2b, 0x62, 0x3a, 0x1f, 0xd5, 0xab, 0xe0, 0x6f, 0xca, 0x1d, 0x4c, 0x2b, 0xd0, 0x30, 0x00, 0xd1, 0x38, 0x2c, 0x85, 0x2f, 0x0f, 0xd0, 0x96, 0x45, 0x7a, 0x1e, 0x75, 0xd9, 0x88, 0x58, 0x83, 0x22, 0x81, 0xbc, 0xaf, 0x65, 0xf9, 0x27, 0x09, 0x9e, 0xdc, 0x6e, 0xab, 0x29, 0x44, 0x88, 0x4d, 0x72, 0xac, 0x27, 0xa9, 0x77, 0xc0, 0x76, 0x9a, 0x25, 0x5c, 0x67, 0x02, 0x79, 0xcf, 0x23, 0x34, 0x54, 0xfe, 0x7b, 0x89, 0x1f, 0x86, 0x42, 0xac, 0x7d, 0x01, 0x1b, 0xc4, 0x2f, 0xde, 0x82, 0xea, 0x20, 0x62, 0x23, 0x40, 0x89, 0x8d, 0x26, 0x71, 0x1b, 0x4b, 0x94, 0x80, 0x36, 0x8d, 0x19, 0xae, 0x9f, 0x34, 0x48, 0xd5, 0x1d, 0x1d, 0xaa, 0xea, 0x54, 0x32, 0x1d, 0x54, 0xad, 0x3e, 0x5d, 0xaa, 0x1d, 0xb6, 0x2c, 0x85, 0x2f, 0x10, 0xd0, 0x96, 0x2c, 0xb2, 0x2e, 0xf2, 0xd0, 0xa4, 0x6c, 0x87, 0x1d, 0xfd, 0xd8, 0x57, 0x73, 0xdb, 0x17, 0x07, 0xc4, 0xf3, 0x7f, 0x58, 0x1d, 0x3f, 0xa4, 0xb8, 0x85, 0x0d, 0x21, 0x0b, 0x8d, 0xd0, 0x82, 0xd6, 0x1d, 0x4c, 0x80, 0x5e, 0x85, 0xa6, 0x1a, 0xc8, 0x6f, 0x86, 0x88, 0x20, 0x19, 0xa3, 0x5c, 0x05, 0x8a, 0xf0, 0x1a, 0x83, 0x4b, 0xcf, 0x8d, 0xf4, 0x1c, 0x3c, 0x3d, 0x4a, 0x93, 0x42, 0x1d, 0xb1, 0x2c, 0x27, 0x99, 0xc9, 0x20, 0x3d, 0x22, 0x9a, 0xa4, 0xf2, 0x23, 0x12, 0x1e, 0xe7, 0xb0, 0x2c, 0x30, 0x84, 0x1c, 0x22, 0xb0, 0x4d, 0x46, 0xc0, 0x1c, 0x88, 0xb0, 0x36, 0x53, 0x1e, 0x1c, 0x49, 0x2c, 0x9b, 0x2f, 0x01, 0xd0, 0x9c, 0x7b, 0xed, 0x21, 0xb8, 0xdb, 0x3a, 0x88, 0xd0, 0x1f, 0xfe, 0xd5, 0x26, 0x95, 0x2a, 0x1c, 0x64, 0xcd, 0x3f, 0x9d, 0xaa, 0x1a, 0x5b, 0xbf, 0x69, 0x9d, 0xd4, 0x1d, 0x45, 0xaa, 0x8b, 0x9b, 0xf6, 0x1e, 0x7c, 0x94, 0x28, 0x9d, 0x36, 0x1b, 0x6f, 0x7e, 0x6e, 0x9c, 0x0f, 0x1d, 0x8b, 0x69, 0x21, 0x9e, 0xf1, 0x1d, 0x11, 0x58, 0x87, 0xa1, 0xdb, 0x1b, 0xca, 0x47, 0xa1, 0xa3, 0x1a, 0x1a, 0xff, 0x36, 0xff, 0xa9, 0x94, 0x1a, 0xe2, 0x24, 0x90, 0xbb, 0xec, 0x1e, 0x34, 0x20, 0xb1, 0xc3, 0x61, 0x25, 0x5f, 0x23, 0x4f, 0xc5, 0x7a, 0x28, 0x72, 0x24, 0x7b, 0xc6, 0x9f, 0x2a, 0x1e, 0x25, 0x1d, 0x86, 0x29, 0x23, 0x7e, 0xdb, 0x3b, 0x90, 0x79, 0x22, 0xd8, 0xd7, 0x95, 0x9b, 0x12, 0x21, 0xf6, 0xd4, 0xa3, 0xa6, 0x13, 0x1e, 0x75, 0xcf, 0xa5, 0xb3, 0xc8, 0x1c, 0x99, 0xc9, 0xf2, 0xba, 0x0f, 0x1c, 0xe6, 0xc1, 0xc5, 0xb3, 0x34, 0x1e, 0x0c, 0xa5, 0xf0, 0xb6, 0x58, 0x1c, 0xde, 0x92, 0xe6, 0xb4, 0x66, 0x1c, 0xe8, 0x79, 0x42, 0xb1, 0x1c, 0x1d, 0x20, 0x63, 0x2f, 0xb0, 0x4e, 0x1b, 0xeb, 0x51, 0x23, 0xaf, 0xde, 0x1a, 0x83, 0x40, 0xff, 0xbb, 0xf2, 0x12, 0x2d, 0x26, 0xdd, 0xc5, 0x67, 0x21, 0x71, 0x26, 0x5c, 0xc6, 0xee, 0x25, 0x9a, 0x26, 0x95, 0xc7, 0xbf, 0x27, 0xe7, 0x26, 0xb9, 0xc8, 0x41, 0x29, 0x5c, 0x26, 0xd0, 0x94, 0x35, 0x24, 0x87, 0xd9, 0xb8, 0x9c, 0xf5, 0x23, 0xca, 0xd6, 0xfe, 0xa8, 0x7a, 0x22, 0x13, 0xd4, 0x51, 0xba, 0x17, 0x1d, 0x65, 0xd2, 0xcf, 0xbd, 0x13, 0x1d, 0x87, 0xce, 0x1c, 0xc5, 0xce, 0x1f, 0x81, 0xc4, 0x8c, 0xc7, 0x07, 0x1e, 0xd9, 0xb5, 0x11, 0xc6, 0xf8, 0x1d, 0xf6, 0xa0, 0x07, 0xc5, 0xa4, 0x1c, 0x61, 0x85, 0x03, 0xc4, 0x50, 0x1c, 0x27, 0x71, 0xd7, 0xc4, 0x13, 0x1a, 0x34, 0x5e, 0x30, 0xc5, 0x1b, 0x1b, 0x08, 0x4e, 0xca, 0xc5, 0xde, 0x17, 0x28, 0x32, 0x6c, 0xc8, 0x5b, 0x22, 0xe0, 0x28, 0xd0, 0xc8, 0xdd, 0x25, 0xbe, 0x28, 0x67, 0xc9, 0x97, 0x2a, 0x17, 0x29, 0x3f, 0xc9, 0xd4, 0x29, 0xb0, 0x28, 0x00, 0x1d, 0x3d, 0xa9, 0x0d, 0xb2, 0xce, 0x1f, 0xd3, 0xa7, 0x79, 0xab, 0x7d, 0x1f, 0xe7, 0xa6, 0x9a, 0xa9, 0x29, 0x21, 0x53, 0xa5, 0x3b, 0xa3, 0x6e, 0x24, 0xc3, 0xa1, 0xa9, 0x9b, 0x60, 0x23, 0xce, 0xa2, 0x71, 0x95, 0x49, 0x26, 0x82, 0xa0, 0xbd, 0x90, 0x38, 0x26, 0xb6, 0xa0, 0x4d, 0x8d, 0xc8, 0x28, 0xa0, 0x96, 0x83, 0x7a, 0xa1, 0x29, 0x58, 0x9a, 0xae, 0x75, 0x11, 0x2a, 0xff, 0x91, 0x39, 0x6a, 0x56, 0x24, 0xa9, 0x9f, 0x00, 0x62, 0x40, 0x24, 0x12, 0x9e, 0x77, 0x5e, 0xa3, 0x1b, 0x73, 0x93, 0xb2, 0x27, 0x57, 0x1b, 0xbb, 0x94, 0x13, 0x21, 0xc8, 0x1c, 0xb2, 0x9c, 0x41, 0x1d, 0x7a, 0x20, 0x23, 0xa0, 0xd7, 0x1d, 0x0f, 0x1a, 0xcf, 0xa8, 0x99, 0xbf, 0xe0, 0x1c, 0x77, 0xa7, 0x65, 0xb3, 0x21, 0x1f, 0x53, 0xa5, 0x3b, 0xa9, 0x75, 0x1f, 0xbe, 0xa3, 0x9f, 0xa5, 0xdb, 0x22, 0xea, 0xa1, 0x7f, 0x9d, 0x66, 0x24, 0xf1, 0x9e, 0x18, 0x96, 0x6f, 0x23, 0xca, 0x9e, 0xbf, 0x8f, 0x63, 0x26, 0x9c, 0x9b, 0x79, 0x8a, 0x2b, 0x28, 0x4e, 0x91, 0x7e, 0x77, 0xa4, 0x28, 0x83, 0x91, 0xeb, 0x6f, 0x5a, 0x28, 0x6f, 0x8d, 0xd8, 0x64, 0xfc, 0x22, 0xa5, 0x95, 0xb7, 0x5c, 0x19, 0x20, 0x04, 0x90, 0x2b, 0x44, 0x9b, 0x1e, 0x6c, 0x8a, 0x75, 0x25, 0x06, 0x1d, 0x31, 0x90, 0xe1, 0x1e, 0x62, 0x1e, 0x51, 0x91, 0xc0, 0x19, 0xe5, 0x20, 0x84, 0x99, 0x75, 0x18, 0xbe, 0x1a, 0x2c, 0xa7, 0x50, 0xc0, 0xbf, 0x19, 0xd3, 0xa6, 0x49, 0xbd, 0xc0, 0x19, 0xe4, 0xa4, 0xfc, 0xb4, 0xc9, 0x1c, 0x26, 0xa2, 0x7c, 0xa7, 0x68, 0x20, 0xbe, 0x9e, 0x6d, 0x9f, 0xf0, 0x24, 0x68, 0x9b, 0x5f, 0x95, 0x4d, 0x24, 0xe7, 0x98, 0x21, 0x8e, 0x62, 0x27, 0x87, 0x92, 0xea, 0x82, 0xa4, 0x29, 0xf8, 0x89, 0x5d, 0x73, 0x6c, 0x28, 0x1e, 0x8b, 0x8a, 0x67, 0xac, 0x28, 0x94, 0x8a, 0x2d, 0x60, 0x15, 0x20, 0xec, 0x85, 0xac, 0x49, 0x95, 0x20, 0xe2, 0x83, 0x34, 0x29, 0xf1, 0x21, 0xd6, 0x88, 0x4d, 0x23, 0x67, 0x23, 0x0c, 0x89, 0x0f, 0x1e, 0x6c, 0x20, 0x91, 0x8c, 0x52, 0x14, 0x9e, 0x22, 0xa5, 0x8f, 0xc6, 0x13, 0x5a, 0x13, 0x25, 0xa7, 0x50, 0xd2, 0xe0, 0x14, 0x48, 0xa4, 0x11, 0xc1, 0x45, 0x17, 0xf5, 0xa1, 0xe8, 0xb9, 0xae, 0x17, 0x48, 0x9e, 0x79, 0xae, 0xda, 0x1c, 0x55, 0x9a, 0xd5, 0xa0, 0xb2, 0x22, 0xcb, 0x94, 0x18, 0x93, 0x9f, 0x23, 0x0e, 0x8d, 0x8c, 0x86, 0x0b, 0x25, 0x91, 0x82, 0x69, 0x74, 0xf0, 0x28, 0xd9, 0x83, 0x1a, 0x6d, 0x23, 0x2a, 0x1b, 0x80, 0x9f, 0x60, 0x3c, 0x27, 0xd1, 0x7d, 0x5a, 0x4e, 0x02, 0x20, 0xa2, 0x79, 0xa9, 0x38, 0x2f, 0x23, 0x77, 0x7c, 0x67, 0x25, 0xf7, 0x25, 0x78, 0x81, 0x4a, 0x1f, 0x70, 0x26, 0x55, 0x85, 0x94, 0x19, 0xfe, 0x27, 0xcb, 0x87, 0x3c, 0x17, 0x8c, 0x45, 0x19, 0x93, 0x3c, 0x11, 0x38, 0x14, 0xe7, 0x9f, 0xc8, 0xd2, 0xb8, 0x11, 0x14, 0xa2, 0x0d, 0xce, 0xdf, 0x12, 0x90, 0x9b, 0xaa, 0xbd, 0xb1, 0x11, 0xed, 0x98, 0x71, 0xb1, 0xc7, 0x1b, 0x79, 0x8d, 0x91, 0x9b, 0xbb, 0x1c, 0xcb, 0x86, 0xc9, 0x8b, 0xc7, 0x21, 0xe7, 0x7d, 0x12, 0x79, 0x61, 0x26, 0xc3, 0x79, 0x7d, 0x6e, 0x41, 0x28, 0xa7, 0x76, 0xff, 0x62, 0x7d, 0x28, 0x70, 0x73, 0x4d, 0x52, 0x4f, 0x25, 0x7c, 0x70, 0x38, 0x3d, 0xf2, 0x24, 0xf1, 0x6e, 0x7a, 0x2a, 0x29, 0x29, 0x49, 0x73, 0x9b, 0x20, 0xa7, 0x2d, 0xdb, 0x7a, 0x0a, 0x1b, 0xbd, 0x31, 0xaa, 0x80, 0xe3, 0x19, 0xaa, 0x54, 0x51, 0x8e, 0xdc, 0x14, 0xb8, 0x60, 0xc0, 0x94, 0x1d, 0x15, 0x7e, 0x13, 0x44, 0x96, 0x4f, 0xd4, 0x94, 0x0f, 0x8e, 0x98, 0x68, 0xcf, 0xcb, 0x0f, 0xf9, 0x95, 0xa0, 0xc6, 0x1b, 0x10, 0xd8, 0x8b, 0x6d, 0xad, 0x3e, 0x14, 0xe2, 0x83, 0x8b, 0x98, 0xd2, 0x1d, 0xbd, 0x7a, 0xd4, 0x85, 0x05, 0x27, 0x44, 0x76, 0x02, 0x77, 0xb5, 0x2c, 0x46, 0x71, 0xc0, 0x6a, 0x1c, 0x2f, 0x86, 0x6d, 0xe2, 0x5b, 0xd8, 0x2f, 0xad, 0x6a, 0x01, 0x4a, 0x15, 0x2f, 0x25, 0x67, 0xce, 0x36, 0x28, 0x30, 0x89, 0x67, 0x2e, 0x22, 0x3b, 0x38, 0xb3, 0x6c, 0x50, 0x18, 0xb3, 0x46, 0x63, 0x75, 0xe0, 0x19, 0x35, 0x55, 0xcd, 0x81, 0x15, 0x18, 0x29, 0x6b, 0x35, 0x8e, 0xdc, 0x17, 0x4c, 0x6f, 0x61, 0x97, 0xaa, 0x18, 0xc2, 0x12, 0xc0, 0x8b, 0x98, 0xd7, 0x1c, 0x0e, 0xf0, 0x8d, 0x6d, 0xd2, 0x88, 0x09, 0x33, 0x8b, 0xfe, 0xc8, 0x74, 0x0b, 0x82, 0x82, 0x53, 0xaf, 0x0a, 0x1a, 0x5b, 0x77, 0x5c, 0x94, 0x94, 0x25, 0x3d, 0x71, 0x06, 0x82, 0xd8, 0x2d, 0x13, 0x6c, 0x00, 0x74, 0x8b, 0x33, 0x71, 0x67, 0x5e, 0x64, 0xe8, 0x37, 0x8e, 0x63, 0x76, 0x56, 0x46, 0x38, 0xc6, 0x60, 0xa5, 0x44, 0x81, 0x3b, 0xac, 0x60, 0x77, 0x31, 0xc3, 0x3f, 0x5d, 0x61, 0x85, 0x20, 0x67, 0x48, 0x6d, 0x67, 0xff, 0x17, 0xb5, 0x57, 0xcf, 0x74, 0x2a, 0x18, 0xf4, 0x64, 0x9f, 0x7f, 0xf0, 0x18, 0x9b, 0x71, 0xf3, 0x8c, 0x0e, 0x19, 0x6d, 0x7e, 0x92, 0x96, 0xb2, 0x1b, 0x0a, 0x0f, 0x7b, 0x7c, 0x5f, 0xdc, 0x25, 0x0e, 0xe1, 0x80, 0xef, 0xd6, 0x06, 0x07, 0x11, 0x7f, 0x2a, 0xc9, 0xba, 0x0c, 0xef, 0x73, 0x39, 0xad, 0xe9, 0x23, 0x64, 0x6c, 0xbb, 0x94, 0x85, 0x2d, 0x69, 0x66, 0xb1, 0x82, 0xb7, 0x36, 0x99, 0x61, 0x34, 0x72, 0x16, 0x3d, 0x7b, 0x5d, 0x31, 0x62, 0x12, 0x42, 0x76, 0x5a, 0x69, 0x52, 0x99, 0x47, 0x2b, 0x59, 0xfb, 0x42, 0x13, 0x4b, 0x47, 0x5b, 0xa6, 0x32, 0x0d, 0x4f, 0x2e, 0x5e, 0x84, 0x22, 0x6a, 0x56, 0x08, 0x63, 0xc4, 0x16, 0xe9, 0x65, 0x37, 0x70, 0xcb, 0x19, 0x60, 0x71, 0xa2, 0x7c, 0x5e, 0x19, 0xda, 0x7d, 0xe3, 0x86, 0xfc, 0x19, 0xe3, 0x8a, 0x25, 0x94, 0xb4, 0x1c, 0x85, 0x17, 0x41, 0x6d, 0xb8, 0xdb, 0x8d, 0x10, 0xfa, 0x70, 0x73, 0xd9, 0x2a, 0x00, 0x00, 0x6f, 0x82, 0xd1, 0x73, 0x16, 0x5e, 0x69, 0x45, 0xb1, 0xe2, 0x2a, 0xa2, 0x61, 0x27, 0x97, 0x49, 0x38, 0x85, 0x5b, 0x6b, 0x81, 0x47, 0x42, 0xa1, 0x57, 0x09, 0x70, 0x9c, 0x4a, 0xa9, 0x54, 0x14, 0x61, 0x54, 0x51, 0x08, 0x52, 0x03, 0x52, 0x2d, 0x55, 0xc5, 0x53, 0xf0, 0x42, 0xd3, 0x59, 0xc6, 0x56, 0xf2, 0x33, 0x9b, 0x5e, 0x00, 0x5b, 0x26, 0x25, 0xad, 0x61, 0xc2, 0x5f, 0xec, 0x18, 0x8d, 0x6f, 0x6d, 0x6c, 0x47, 0x19, 0xd8, 0x7c, 0x6d, 0x79, 0x0e, 0x1b, 0x54, 0x88, 0x10, 0x82, 0x47, 0x1b, 0x6d, 0x96, 0x48, 0x90, 0x7d, 0x1e, 0x71, 0x1d, 0x61, 0x5b, 0x89, 0xdb, 0x6b, 0x19, 0xc1, 0x5d, 0xde, 0xda, 0x2c, 0x0e, 0xbe, 0x60, 0x86, 0xd4, 0x2b, 0x25, 0x87, 0x59, 0x8d, 0xb3, 0xde, 0x39, 0x12, 0x54, 0xa6, 0x95, 0x04, 0x47, 0xde, 0x50, 0x76, 0x81, 0x65, 0x51, 0x7e, 0x4d, 0x94, 0x71, 0x09, 0x58, 0xc6, 0x4b, 0xfd, 0x62, 0x0e, 0x5e, 0x2a, 0x4b, 0x47, 0x52, 0xa3, 0x62, 0xb2, 0x4b, 0x6d, 0x43, 0x23, 0x66, 0xad, 0x4e, 0xd4, 0x34, 0xeb, 0x6a, 0x49, 0x52, 0x4f, 0x27, 0x40, 0x6d, 0x9f, 0x58, 0x6e, 0x1b, 0x0a, 0x7a, 0x25, 0x64, 0x28, 0x1b, 0x4f, 0x89, 0x3b, 0x72, 0xde, 0x1d, 0xe4, 0x93, 0x8a, 0x7c, 0x6b, 0x1e, 0x79, 0x9d, 0x13, 0x85, 0x29, 0x1e, 0xc7, 0x2b, 0xb0, 0x30, 0x28, 0xd1, 0x36, 0x2a, 0xef, 0x31, 0xd4, 0xd1, 0xc5, 0x20, 0xba, 0x4a, 0xbc, 0xd9, 0x54, 0x34, 0xfc, 0x4b, 0x22, 0xb6, 0x6c, 0x4b, 0x9a, 0x48, 0x2c, 0x95, 0x61, 0x58, 0xad, 0x46, 0x06, 0x82, 0x9c, 0x60, 0xc9, 0x44, 0x8c, 0x73, 0x79, 0x65, 0x53, 0x43, 0x59, 0x65, 0x86, 0x69, 0xff, 0x41, 0x6b, 0x54, 0xbd, 0x6e, 0x40, 0x40, 0xe4, 0x44, 0x88, 0x72, 0x25, 0x43, 0x54, 0x35, 0x85, 0x75, 0x45, 0x46, 0x91, 0x28, 0x0d, 0x78, 0xa7, 0x4c, 0xae, 0x1a, 0xaf, 0x85, 0x9b, 0x5a, 0x3a, 0x1c, 0xd7, 0x93, 0x66, 0x68, 0x87, 0x1f, 0xa9, 0x9c, 0x67, 0x74, 0x23, 0x1f, 0x29, 0xa6, 0xb0, 0x7b, 0x9a, 0x1f, 0x83, 0x2b, 0xc6, 0x30, 0x19, 0xd1, 0x3c, 0x2b, 0xd3, 0x30, 0x23, 0xd1, 0x49, 0x39, 0x52, 0x37, 0x10, 0xe0, 0xc4, 0x4c, 0xb3, 0x38, 0x23, 0xbe, 0x99, 0x5f, 0xdd, 0x3b, 0xa8, 0x9a, 0xb7, 0x69, 0x52, 0x3b, 0xf8, 0x87, 0x10, 0x6d, 0x9f, 0x3a, 0xc4, 0x77, 0xfe, 0x71, 0x90, 0x38, 0xff, 0x69, 0x4e, 0x75, 0xf6, 0x36, 0x82, 0x58, 0x20, 0x79, 0x37, 0x34, 0xa7, 0x46, 0xb7, 0x7c, 0x33, 0x34, 0x0f, 0x35, 0x9e, 0x7f, 0x4c, 0x36, 0xce, 0x27, 0x75, 0x81, 0xca, 0x3a, 0x48, 0x19, 0x2d, 0x90, 0x2b, 0x4b, 0xe8, 0x1d, 0x4a, 0x99, 0xd5, 0x5b, 0x39, 0x20, 0x58, 0xa2, 0xee, 0x65, 0xad, 0x1f, 0x8c, 0xad, 0x2a, 0x72, 0xc1, 0x1d, 0x84, 0x2b, 0x8b, 0x30, 0x72, 0xcd, 0xab, 0x2c, 0x9d, 0x2f, 0x23, 0xd0, 0xae, 0x53, 0x42, 0x25, 0xe6, 0xe9, 0x5a, 0x66, 0x89, 0x29, 0xee, 0xc7, 0xe4, 0x74, 0x1d, 0x30, 0x7c, 0xa4, 0x87, 0x79, 0xc2, 0x32, 0x7f, 0x8f, 0xe0, 0x7c, 0x00, 0x31, 0x2b, 0x7f, 0xb0, 0x80, 0x16, 0x2e, 0x3f, 0x6e, 0xcc, 0x82, 0xc7, 0x2b, 0xd6, 0x5c, 0xbf, 0x84, 0xd1, 0x29, 0x4d, 0x4a, 0xd3, 0x87, 0x19, 0x26, 0xdb, 0x39, 0x04, 0x89, 0x2e, 0x25, 0xb9, 0x27, 0xcb, 0x8b, 0xd1, 0x28, 0x3f, 0x18, 0x71, 0x98, 0x86, 0x3b, 0x11, 0x1a, 0x13, 0xa5, 0x32, 0x4d, 0x46, 0x1d, 0x76, 0xac, 0x78, 0x55, 0x8e, 0x1d, 0x34, 0xae, 0xd8, 0x63, 0x08, 0x1d, 0x85, 0x2c, 0x91, 0x2f, 0x18, 0xd0, 0xa2, 0x2c, 0xcb, 0x2f, 0x05, 0xd0, 0xbc, 0x76, 0x57, 0x22, 0xe8, 0xdd, 0xdf, 0x81, 0x11, 0x1e, 0xbb, 0xd0, 0x5f, 0x8a, 0x80, 0x24, 0xe7, 0xb1, 0xfe, 0x8e, 0x1e, 0x28, 0x41, 0x99, 0xfa, 0x8c, 0xdf, 0x25, 0xa8, 0x89, 0x85, 0x8f, 0x5e, 0x23, 0x9a, 0x77, 0x86, 0x8f, 0xd1, 0x20, 0xd7, 0x62, 0x16, 0x91, 0x19, 0x1e, 0x55, 0x50, 0x03, 0x91, 0x64, 0x1c, 0x09, 0x3e, 0x7a, 0x97, 0xa0, 0x1d, 0xaf, 0x2f, 0x4d, 0x9e, 0x8e, 0x1f, 0x83, 0x21, 0xd2, 0xab, 0x19, 0x26, 0xfd, 0x1e, 0xf8, 0xb7, 0x22, 0x36, 0xf8, 0x1b, 0x7e, 0xbf, 0x74, 0x44, 0x0b, 0x1b, 0xd8, 0xc3, 0x8f, 0x55, 0x6a, 0x1e, 0x0c, 0x2c, 0xa8, 0x2f, 0x09, 0xd0, 0xa9, 0x82, 0x30, 0x24, 0x4f, 0xdd, 0x8d, 0x8f, 0x5d, 0x23, 0x36, 0xd8, 0x69, 0x9d, 0x19, 0x20, 0xc4, 0xd2, 0x62, 0xa4, 0x96, 0x1a, 0xe4, 0xc5, 0xa2, 0xa3, 0x08, 0x1d, 0xe3, 0xad, 0xd9, 0xa4, 0x56, 0x1d, 0x25, 0x99, 0xf7, 0xa3, 0x99, 0x1c, 0x23, 0x82, 0x3d, 0xa4, 0x94, 0x1d, 0x59, 0x6d, 0xe4, 0xa4, 0xcf, 0x1d, 0x4e, 0x5b, 0xa7, 0xa5, 0x7c, 0x1c, 0xc9, 0x4a, 0x6a, 0xaa, 0x9a, 0x19, 0xe7, 0x39, 0xe3, 0xaf, 0x96, 0x18, 0xc8, 0x25, 0x2c, 0xc1, 0x4d, 0x20, 0xcc, 0x22, 0x98, 0xc5, 0x2a, 0x27, 0x07, 0x24, 0x8a, 0xc6, 0xbb, 0x29, 0x9b, 0x25, 0x57, 0xc6, 0xc3, 0x43, 0x89, 0x21, 0xa8, 0x88, 0xa4, 0x25, 0x13, 0xdc, 0xf9, 0x94, 0x2d, 0x25, 0x30, 0xda, 0xb1, 0x9f, 0x80, 0x24, 0x4f, 0xd6, 0xf0, 0xa9, 0x94, 0x21, 0x42, 0xd3, 0x5c, 0xb8, 0x06, 0x1c, 0xc7, 0xce, 0x9f, 0xbf, 0x75, 0x1d, 0x4c, 0xc4, 0xda, 0xbe, 0xa1, 0x1e, 0x60, 0xad, 0x89, 0xc0, 0x27, 0x1c, 0xac, 0x97, 0x9a, 0xc0, 0x36, 0x1b, 0xfe, 0x7f, 0x5f, 0xbe, 0x21, 0x1b, 0xfe, 0x6b, 0x46, 0xbe, 0x3c, 0x1a, 0x26, 0x58, 0x11, 0xbc, 0x8e, 0x17, 0x31, 0x43, 0x65, 0xc2, 0x5a, 0x13, 0xdb, 0x29, 0x32, 0xc7, 0x31, 0x23, 0x14, 0x27, 0x9a, 0xc8, 0x30, 0x26, 0xc3, 0x27, 0x74, 0xc8, 0xb7, 0x28, 0xcd, 0x27, 0x64, 0xc8, 0x30, 0x2d, 0xd3, 0x29, 0xef, 0x98, 0xc1, 0x25, 0xdf, 0xda, 0xcb, 0x9e, 0xd1, 0x25, 0x2f, 0xd8, 0xb1, 0xaa, 0x96, 0x23, 0xc6, 0xd6, 0x7a, 0xbd, 0xc8, 0x1e, 0xcb, 0xd7, 0x64, 0xc2, 0xc6, 0x1d, 0xe3, 0xd1, 0xcf, 0xc8, 0x75, 0x1f, 0xd8, 0xc7, 0x20, 0xc9, 0x99, 0x1f, 0x1c, 0xb6, 0xc3, 0xc9, 0xbe, 0x1e, 0x21, 0xa1, 0xa9, 0xc8, 0x49, 0x1c, 0x84, 0x86, 0x37, 0xc6, 0xf4, 0x1c, 0x24, 0x72, 0xe0, 0xc7, 0x1c, 0x1c, 0x66, 0x60, 0x22, 0xc7, 0xe2, 0x1d, 0x19, 0x50, 0x6e, 0xc9, 0x92, 0x19, 0xc5, 0x34, 0x4d, 0xca, 0x29, 0x24, 0xf6, 0x29, 0xb7, 0xca, 0x50, 0x27, 0x84, 0x29, 0x16, 0xca, 0x66, 0x29, 0x27, 0x28, 0xb5, 0xca, 0x74, 0x2a, 0x48, 0x28, 0x73, 0x1d, 0xc6, 0xaa, 0xa5, 0xb4, 0xea, 0x20, 0x32, 0xa9, 0x5f, 0xad, 0x74, 0x20, 0x4f, 0xa8, 0xc4, 0xab, 0x67, 0x21, 0x2e, 0xa8, 0xc7, 0xa8, 0x57, 0x24, 0x86, 0xa4, 0x20, 0x9d, 0xc6, 0x23, 0xa4, 0xa4, 0xc0, 0x97, 0x87, 0x26, 0xc4, 0xa3, 0x41, 0x92, 0x4e, 0x26, 0xf4, 0xa2, 0xd4, 0x8f, 0xdb, 0x28, 0x30, 0xa3, 0x71, 0x85, 0xc8, 0x29, 0xac, 0xa3, 0x23, 0x7d, 0x2c, 0x28, 0x81, 0x9a, 0xfd, 0x6f, 0xb5, 0x25, 0x15, 0xa1, 0x0d, 0x63, 0x6e, 0x24, 0x7f, 0xa0, 0x84, 0x5f, 0xcc, 0x15, 0x21, 0x97, 0x73, 0x23, 0xb4, 0x1a, 0xe8, 0x9f, 0x47, 0x20, 0xe5, 0x20, 0x26, 0xa6, 0xdc, 0x20, 0xa2, 0x23, 0xad, 0xb3, 0x71, 0x21, 0x25, 0x1b, 0x95, 0xaa, 0x6e, 0xc1, 0xe6, 0x1d, 0x2c, 0xa9, 0x6f, 0xb5, 0xb5, 0x1f, 0xd3, 0xa7, 0xb9, 0xac, 0x0b, 0x1f, 0xee, 0xa6, 0x93, 0xa8, 0xf2, 0x22, 0x6e, 0xa4, 0x8d, 0xa0, 0x7f, 0x24, 0xad, 0xa0, 0xeb, 0x99, 0x29, 0x23, 0xa0, 0xa1, 0x51, 0x91, 0xe2, 0x26, 0x79, 0x9e, 0x4b, 0x8c, 0x9c, 0x28, 0x8d, 0x95, 0x89, 0x7a, 0xa6, 0x28, 0xcf, 0x95, 0xa3, 0x71, 0xb9, 0x24, 0xf6, 0x8e, 0xae, 0x65, 0x79, 0x23, 0xc3, 0x9b, 0xf4, 0x5e, 0xc4, 0x16, 0x79, 0x98, 0x30, 0x3d, 0x92, 0x1b, 0xda, 0x92, 0xd8, 0x24, 0xf6, 0x19, 0x0f, 0x93, 0x84, 0x1a, 0xfd, 0x1f, 0xbc, 0x9d, 0x8b, 0x1b, 0xb2, 0x23, 0x69, 0xa3, 0x01, 0x1b, 0xfc, 0x1b, 0x10, 0xa9, 0x63, 0xc3, 0x0a, 0x1a, 0xd5, 0xa8, 0xaf, 0xc0, 0x67, 0x1a, 0xf5, 0xa7, 0xcd, 0xb8, 0x26, 0x1c, 0x79, 0xa5, 0xb0, 0xab, 0x54, 0x20, 0x30, 0xa1, 0xfa, 0xa3, 0xb6, 0x24, 0x09, 0x9e, 0xae, 0x98, 0x9b, 0x24, 0xb8, 0x9a, 0xee, 0x91, 0x20, 0x27, 0x53, 0x96, 0x4f, 0x85, 0xd2, 0x29, 0x57, 0x8b, 0xcb, 0x75, 0x90, 0x28, 0x02, 0x8d, 0x6d, 0x69, 0xe2, 0x23, 0xc1, 0x8a, 0xbd, 0x5e, 0x65, 0x20, 0xe9, 0x88, 0xb8, 0x4a, 0xd7, 0x20, 0x1d, 0x89, 0xbc, 0x2f, 0x42, 0x1f, 0x9a, 0x87, 0xe1, 0x21, 0x3a, 0x1f, 0x0b, 0x8e, 0x4c, 0x18, 0x93, 0x21, 0x81, 0x90, 0xf8, 0x15, 0x7a, 0x24, 0xbe, 0x9a, 0xd5, 0x16, 0xae, 0x14, 0xba, 0xa9, 0xae, 0xd5, 0x96, 0x11, 0x8a, 0xac, 0x22, 0xd2, 0xcb, 0x19, 0x66, 0xa5, 0x60, 0xbd, 0x8f, 0x18, 0xf8, 0xa2, 0xee, 0xb4, 0x03, 0x1b, 0x95, 0x9e, 0x8a, 0xa5, 0x80, 0x22, 0x61, 0x97, 0x86, 0x97, 0x3f, 0x26, 0x95, 0x8d, 0x14, 0x88, 0x75, 0x27, 0xcd, 0x85, 0xd8, 0x78, 0xf9, 0x29, 0xe9, 0x83, 0x1a, 0x6d, 0xd1, 0x28, 0xaa, 0x83, 0x2a, 0x62, 0xdf, 0x27, 0x59, 0x7e, 0xf7, 0x50, 0x3c, 0x20, 0xf2, 0x7d, 0x5e, 0x3a, 0xa5, 0x22, 0xa3, 0x7f, 0x56, 0x26, 0x75, 0x24, 0xd3, 0x85, 0x2a, 0x1d, 0x42, 0x26, 0xc9, 0x87, 0x33, 0x18, 0x1d, 0x26, 0x60, 0x8a, 0x0c, 0x10, 0x03, 0x46, 0x08, 0x95, 0xef, 0x10, 0xca, 0x16, 0x77, 0xa3, 0x7c, 0xd5, 0xb1, 0x13, 0x52, 0xa5, 0xfb, 0xd2, 0x98, 0x10, 0x08, 0xa3, 0x97, 0xcb, 0x50, 0x12, 0x1a, 0x9d, 0x8f, 0xb7, 0xcc, 0x17, 0x51, 0x95, 0x1f, 0xa5, 0x67, 0x1d, 0x50, 0x89, 0xa7, 0x8f, 0x10, 0x25, 0xf6, 0x85, 0x23, 0x81, 0xf9, 0x2b, 0xb3, 0x82, 0x3b, 0x76, 0xd5, 0x2e, 0x18, 0x7e, 0x5f, 0x69, 0x19, 0x2e, 0xdd, 0x7b, 0x0b, 0x59, 0xd6, 0x2d, 0x11, 0x78, 0x3e, 0x46, 0xf4, 0x2a, 0x5f, 0x76, 0x1e, 0x31, 0xc7, 0x2c, 0x5a, 0x74, 0x22, 0x1d, 0x52, 0x32, 0xfa, 0x7c, 0xc8, 0x1a, 0x7e, 0x44, 0x7e, 0x85, 0xd0, 0x13, 0x40, 0x56, 0xec, 0x91, 0x5f, 0x14, 0xaf, 0x6b, 0x3c, 0x9d, 0xca, 0x16, 0x23, 0x15, 0x7a, 0x99, 0xdb, 0xd7, 0x60, 0x12, 0x74, 0x9c, 0x96, 0xd3, 0xad, 0x10, 0x87, 0x9a, 0xf0, 0xcb, 0xc7, 0x12, 0x28, 0x92, 0xec, 0xb8, 0x6d, 0x15, 0x63, 0x8a, 0x0a, 0xa1, 0x5e, 0x23, 0xf9, 0x83, 0x68, 0x8e, 0xf3, 0x2c, 0x46, 0x7e, 0x37, 0x7f, 0xd2, 0x32, 0x1e, 0x79, 0x0e, 0x71, 0xe6, 0x35, 0x1c, 0x75, 0x2b, 0x63, 0x5d, 0x36, 0x2a, 0x71, 0xe3, 0x52, 0xa5, 0x36, 0x27, 0x6f, 0xc2, 0x3e, 0x30, 0x39, 0x2a, 0x70, 0x07, 0x2c, 0x45, 0x40, 0x06, 0x6f, 0x68, 0x1b, 0x2e, 0x4a, 0xb8, 0x7a, 0x42, 0x18, 0xa2, 0x5a, 0x97, 0x81, 0xcd, 0x16, 0x73, 0x6d, 0xda, 0x91, 0xc6, 0x17, 0x75, 0x71, 0x8d, 0x99, 0x48, 0x19, 0x01, 0x15, 0x6e, 0x8f, 0x96, 0xda, 0x37, 0x12, 0x85, 0x92, 0x16, 0xd6, 0xda, 0x0d, 0xe1, 0x91, 0x05, 0xcd, 0xc4, 0x0a, 0x33, 0x88, 0xda, 0xb8, 0x05, 0x21, 0x3a, 0x7f, 0xf2, 0x9f, 0x35, 0x2c, 0x03, 0x79, 0x7b, 0x8d, 0x89, 0x34, 0x8a, 0x74, 0x08, 0x7c, 0xd7, 0x39, 0x6b, 0x6f, 0x30, 0x6c, 0xe5, 0x3e, 0x3b, 0x6b, 0x6a, 0x5e, 0x19, 0x40, 0xea, 0x68, 0xe2, 0x4d, 0x2d, 0x43, 0xd3, 0x68, 0xbf, 0x3a, 0x25, 0x49, 0x58, 0x6b, 0x24, 0x2a, 0x75, 0x4e, 0x34, 0x6d, 0xef, 0x1b, 0xe8, 0x5b, 0x0f, 0x77, 0xdd, 0x19, 0x1f, 0x67, 0x4b, 0x82, 0x97, 0x18, 0xb5, 0x77, 0xf0, 0x91, 0x46, 0x1a, 0x4c, 0x80, 0x88, 0x98, 0x17, 0x1b, 0x47, 0x13, 0x1d, 0x80, 0xfa, 0xdf, 0x7b, 0x13, 0x6f, 0x86, 0x20, 0xda, 0xa3, 0x0c, 0xe0, 0x86, 0x44, 0xd2, 0x1d, 0x15, 0xa0, 0x7e, 0x0d, 0xb9, 0xab, 0x28, 0x25, 0x75, 0xe7, 0xa0, 0xbd, 0x34, 0xc5, 0x6f, 0x1f, 0x8c, 0xc4, 0x3d, 0xea, 0x69, 0x97, 0x7b, 0xa3, 0x45, 0xcb, 0x65, 0x9d, 0x6b, 0x27, 0x4b, 0x09, 0x63, 0x26, 0x5b, 0x76, 0x50, 0x03, 0x62, 0xaa, 0x4b, 0x22, 0x54, 0x47, 0x64, 0x36, 0x3b, 0x15, 0x58, 0x74, 0x67, 0x79, 0x2c, 0x1f, 0x5d, 0x1a, 0x6a, 0xce, 0x1d, 0xb3, 0x67, 0x71, 0x74, 0x69, 0x19, 0x40, 0x74, 0x30, 0x7f, 0x73, 0x19, 0xe4, 0x81, 0xbe, 0x8b, 0x51, 0x1a, 0x7d, 0x8e, 0xeb, 0x99, 0x08, 0x1d, 0x25, 0x19, 0xe0, 0x72, 0x45, 0xdf, 0x46, 0x15, 0xb1, 0x76, 0x13, 0xde, 0xa8, 0x0a, 0xcb, 0x78, 0xc9, 0xdc, 0xa2, 0x21, 0x8c, 0x72, 0x23, 0xbc, 0x64, 0x34, 0x0a, 0x6a, 0x43, 0xa0, 0xcc, 0x42, 0x01, 0x63, 0xa9, 0x8b, 0x90, 0x4c, 0x29, 0x5f, 0x6c, 0x7a, 0xb2, 0x53, 0xf3, 0x5d, 0x03, 0x6a, 0xb4, 0x5a, 0x2b, 0x5b, 0x22, 0x5b, 0x4e, 0x5e, 0xaa, 0x5c, 0xc5, 0x4b, 0xcb, 0x62, 0x7c, 0x5f, 0x97, 0x3c, 0x97, 0x66, 0x5a, 0x63, 0x18, 0x2e, 0x13, 0x6a, 0x4f, 0x67, 0x28, 0x20, 0x6b, 0x72, 0xa8, 0x6f, 0xe9, 0x1a, 0x24, 0x7f, 0xca, 0x7c, 0x5a, 0x1b, 0xb7, 0x8b, 0xb8, 0x86, 0x98, 0x1b, 0xf0, 0x98, 0x8d, 0x94, 0x26, 0x1f, 0x06, 0x1f, 0xcf, 0x60, 0xa5, 0xde, 0xd7, 0x1f, 0xc2, 0x64, 0x5e, 0xde, 0x6b, 0x1a, 0x7c, 0x69, 0xc4, 0xdf, 0xae, 0x30, 0x56, 0x64, 0x26, 0xbf, 0x17, 0x43, 0xfb, 0x5d, 0xaa, 0xa0, 0xb5, 0x52, 0x20, 0x59, 0x3b, 0x8c, 0x0b, 0x5b, 0xab, 0x56, 0xa7, 0x7a, 0xf6, 0x62, 0x3e, 0x54, 0xdd, 0x6b, 0x64, 0x67, 0x1c, 0x54, 0x42, 0x5b, 0xbe, 0x6b, 0x83, 0x54, 0x82, 0x4b, 0xf1, 0x6f, 0x6d, 0x57, 0xd6, 0x3d, 0x66, 0x73, 0x3e, 0x5b, 0xe1, 0x2f, 0xb2, 0x76, 0x3e, 0x60, 0x40, 0x22, 0x74, 0x7e, 0x3f, 0x69, 0x00, 0x1c, 0x4e, 0x8b, 0xd5, 0x75, 0x43, 0x1d, 0x9e, 0x96, 0xe9, 0x7e, 0x98, 0x1d, 0x8b, 0xa2, 0xfd, 0x8c, 0x1c, 0x1f, 0xf3, 0x2b, 0xbc, 0x30, 0x33, 0xd1, 0x42, 0x2b, 0x03, 0x36, 0x32, 0xd3, 0xc6, 0x2d, 0x02, 0x56, 0x2e, 0xe5, 0x2d, 0x42, 0x5e, 0x54, 0x9b, 0xc2, 0x30, 0x56, 0xe7, 0x51, 0x16, 0xa1, 0x22, 0x62, 0xca, 0x4f, 0x48, 0x8d, 0x1f, 0x6a, 0xa1, 0x4e, 0x0a, 0x7d, 0x1b, 0x6e, 0xbe, 0x4c, 0xf8, 0x6e, 0xb2, 0x73, 0x3c, 0x4b, 0x61, 0x5d, 0x55, 0x77, 0x8f, 0x4a, 0x5d, 0x4c, 0xf6, 0x7b, 0x6c, 0x4d, 0x13, 0x3d, 0xb2, 0x7e, 0xcb, 0x51, 0x0e, 0x30, 0x86, 0x81, 0xe8, 0x55, 0xdc, 0x22, 0xca, 0x89, 0x28, 0x5e, 0x43, 0x1d, 0x65, 0x96, 0x83, 0x6d, 0x3a, 0x20, 0x54, 0xa1, 0x61, 0x75, 0xa8, 0x1f, 0x20, 0xab, 0x82, 0x7f, 0xbe, 0x1f, 0xbe, 0x2b, 0xd3, 0x30, 0x24, 0xd1, 0x49, 0x2b, 0xd0, 0x30, 0x30, 0xd1, 0x80, 0x48, 0xf7, 0x40, 0xe8, 0xed, 0xe8, 0x5b, 0x09, 0x42, 0xb0, 0xc9, 0x0b, 0x6b, 0x14, 0x44, 0xfc, 0xa4, 0xe2, 0x73, 0x16, 0x45, 0x89, 0x91, 0x08, 0x77, 0x49, 0x44, 0xaa, 0x81, 0x2e, 0x7b, 0x24, 0x43, 0x76, 0x71, 0xbe, 0x7f, 0x9b, 0x40, 0x87, 0x5f, 0xf1, 0x82, 0xe5, 0x3f, 0x0d, 0x4f, 0x35, 0x86, 0x04, 0x3e, 0xed, 0x3e, 0xa6, 0x89, 0x19, 0x42, 0x09, 0x30, 0x15, 0x8b, 0xd0, 0x45, 0x20, 0x21, 0xd0, 0x94, 0x00, 0x52, 0x1f, 0x1e, 0x80, 0x9f, 0x8e, 0x5f, 0xa8, 0x20, 0x32, 0xab, 0x58, 0x6c, 0x12, 0x1d, 0x5f, 0xae, 0x8e, 0x74, 0x7c, 0x1d, 0xa9, 0x2c, 0x86, 0x2f, 0x31, 0xd0, 0xa7, 0x2c, 0xb4, 0x2f, 0x37, 0xd0, 0xc6, 0x63, 0xe0, 0x2f, 0xa7, 0xf6, 0x39, 0x75, 0xd6, 0x33, 0x69, 0xd2, 0x11, 0x7f, 0xf7, 0x38, 0x63, 0xaf, 0x92, 0x83, 0x51, 0x3a, 0xe0, 0x99, 0x7d, 0x85, 0x86, 0x3a, 0x52, 0x88, 0x2d, 0x89, 0x72, 0x38, 0x15, 0x76, 0xa1, 0x8c, 0x90, 0x35, 0x77, 0x64, 0x6f, 0x8e, 0xda, 0x33, 0xc4, 0x53, 0x5c, 0x91, 0x0f, 0x31, 0x43, 0x42, 0x31, 0x93, 0x7f, 0x30, 0x30, 0x30, 0x86, 0x96, 0x31, 0x33, 0x38, 0x21, 0x2e, 0x9e, 0x3c, 0x3e, 0x7c, 0x1a, 0x7a, 0xaa, 0xc8, 0x50, 0x78, 0x1c, 0xfe, 0xae, 0x69, 0x59, 0xbe, 0x1d, 0x33, 0xb5, 0xac, 0x66, 0xa3, 0x1b, 0x71, 0x2c, 0x9d, 0x2f, 0x23, 0xd0, 0xae, 0x6f, 0xbb, 0x28, 0xf8, 0xe7, 0x4c, 0x7e, 0x9d, 0x27, 0xdc, 0xe3, 0x1d, 0x90, 0xb0, 0x26, 0x12, 0xdc, 0x53, 0x96, 0xeb, 0x2b, 0x7e, 0xbf, 0x85, 0x96, 0xb0, 0x2d, 0x8d, 0xa8, 0xb3, 0x97, 0x82, 0x2d, 0x85, 0x93, 0x09, 0x99, 0x5d, 0x2b, 0x7e, 0x7e, 0x46, 0x9a, 0xa4, 0x2a, 0x15, 0x6a, 0x45, 0x9c, 0x07, 0x28, 0x06, 0x58, 0x33, 0x9d, 0x21, 0x25, 0x35, 0x46, 0x3d, 0x9f, 0xc0, 0x21, 0xe3, 0x33, 0xc8, 0xa4, 0x74, 0x1e, 0x49, 0x20, 0x4f, 0xb1, 0x0d, 0x29, 0x2d, 0x1d, 0x5a, 0xbb, 0xe5, 0x3a, 0x8b, 0x1b, 0xd6, 0xc2, 0xee, 0x4c, 0x25, 0x1e, 0x10, 0xc5, 0x56, 0x5a, 0x7f, 0x1e, 0xd4, 0x2c, 0xb4, 0x2f, 0x14, 0xd0, 0xb5, 0x88, 0x4d, 0x26, 0xda, 0xdf, 0xbe, 0x95, 0x18, 0x26, 0xd0, 0xdc, 0xda, 0xa4, 0x11, 0x25, 0x06, 0xd7, 0xd1, 0xaf, 0x2b, 0x1b, 0xd6, 0xcf, 0xcb, 0xad, 0x58, 0x1d, 0x2e, 0xb8, 0x40, 0xab, 0xa4, 0x1d, 0xad, 0x9f, 0x6b, 0xab, 0xd9, 0x1b, 0xec, 0x87, 0x1d, 0xab, 0xec, 0x1d, 0x1f, 0x71, 0xc2, 0xac, 0x24, 0x1d, 0xf3, 0x5f, 0x6b, 0xae, 0x53, 0x1b, 0x5e, 0x4d, 0x70, 0xaf, 0x7e, 0x19, 0x53, 0x3a, 0xf9, 0xb8, 0xfc, 0x13, 0xc5, 0x25, 0x83, 0xc4, 0x60, 0x23, 0x9f, 0x24, 0xb6, 0xc6, 0xf0, 0x28, 0xad, 0x25, 0xc4, 0xc7, 0xfb, 0x2a, 0xc6, 0x26, 0x34, 0xc8, 0x2c, 0x49, 0x82, 0x22, 0xb2, 0x8a, 0xbb, 0x26, 0xa2, 0xde, 0xc5, 0x99, 0x39, 0x27, 0x07, 0xdc, 0x60, 0xa4, 0x4c, 0x26, 0x62, 0xd9, 0x84, 0xaf, 0x52, 0x23, 0xec, 0xd7, 0xe3, 0xbe, 0x5b, 0x1d, 0x11, 0xd5, 0x98, 0xc5, 0xbd, 0x1d, 0xc8, 0xc9, 0x48, 0xc5, 0x23, 0x1e, 0xa0, 0xb3, 0x47, 0xc4, 0xce, 0x1c, 0xd0, 0x9a, 0x17, 0xc3, 0xfb, 0x1c, 0x31, 0x81, 0x48, 0xc2, 0xf0, 0x1b, 0xf8, 0x6d, 0x72, 0xc3, 0x21, 0x19, 0xf2, 0x5a, 0x1d, 0xc4, 0x7b, 0x19, 0x80, 0x47, 0xd8, 0xc6, 0xe1, 0x17, 0xca, 0x2c, 0x9d, 0xc9, 0x75, 0x27, 0xbe, 0x29, 0xae, 0xc9, 0xe7, 0x28, 0xc8, 0x28, 0x54, 0xca, 0x1a, 0x2a, 0x85, 0x28, 0x0b, 0xca, 0x39, 0x2b, 0xa0, 0x27, 0xde, 0x9d, 0x3c, 0x27, 0x31, 0xdb, 0xdd, 0xa3, 0xae, 0x26, 0xd6, 0xda, 0x17, 0xac, 0xb5, 0x25, 0x7f, 0xd8, 0x9c, 0xc0, 0xc1, 0x21, 0x56, 0xda, 0x42, 0xcb, 0xd4, 0x1e, 0x1e, 0xd6, 0xe7, 0xcc, 0x01, 0x20, 0x52, 0xca, 0x94, 0xcc, 0xef, 0x1f, 0x86, 0xb9, 0x23, 0xcc, 0xc9, 0x1d, 0x2a, 0xa1, 0x84, 0xcb, 0xb5, 0x1c, 0xb2, 0x87, 0xe5, 0xca, 0x51, 0x1d, 0x63, 0x74, 0xb5, 0xca, 0x26, 0x1e, 0xb5, 0x62, 0x16, 0xca, 0xa6, 0x1f, 0x2e, 0x52, 0x0d, 0xcc, 0x1e, 0x1c, 0x10, 0x36, 0x60, 0xcb, 0x58, 0x26, 0x12, 0x2a, 0x90, 0xcb, 0x39, 0x28, 0x61, 0x29, 0xbd, 0xcb, 0x24, 0x29, 0xdb, 0x29, 0x3c, 0xcb, 0x14, 0x2a, 0xe2, 0x28, 0xe5, 0x1e, 0x52, 0xac, 0x3f, 0xb7, 0x07, 0x20, 0x91, 0xab, 0x46, 0xaf, 0x6b, 0x20, 0xb7, 0xaa, 0xf0, 0xad, 0xa4, 0x21, 0xa5, 0xac, 0x75, 0xaa, 0xd3, 0x22, 0x09, 0xaf, 0xc5, 0xa7, 0xa1, 0x23, 0x71, 0xa7, 0x95, 0x9a, 0x43, 0x27, 0x03, 0xa5, 0xca, 0x94, 0x68, 0x27, 0x31, 0xa5, 0x61, 0x91, 0xf2, 0x28, 0x78, 0xa5, 0x39, 0x87, 0x27, 0x29, 0xd0, 0xa4, 0x7e, 0x7e, 0x08, 0x29, 0x68, 0xa3, 0x93, 0x74, 0xc1, 0x25, 0x7f, 0xa3, 0x1d, 0x64, 0x9f, 0x24, 0xae, 0xa2, 0x70, 0x5f, 0xb4, 0x18, 0x77, 0xa4, 0x19, 0x26, 0x2c, 0x1f, 0xa0, 0xb0, 0x47, 0x24, 0x88, 0x24, 0x03, 0xbb, 0xf2, 0x24, 0xfb, 0x26, 0x29, 0xbf, 0x24, 0x24, 0x6a, 0x1c, 0x5e, 0xac, 0x43, 0xc3, 0xea, 0x1d, 0xe5, 0xab, 0x7c, 0xb8, 0x4b, 0x20, 0x50, 0xaa, 0x39, 0xae, 0xa3, 0x20, 0x7c, 0xa9, 0x8e, 0xac, 0x09, 0x21, 0xca, 0xaa, 0x76, 0xa7, 0x7a, 0x24, 0x55, 0xa4, 0x88, 0x9c, 0xa3, 0x23, 0x64, 0xa4, 0xc2, 0x95, 0x34, 0x26, 0xe1, 0xa2, 0x86, 0x90, 0x14, 0x27, 0x75, 0xa2, 0xbc, 0x86, 0x25, 0x29, 0xe7, 0xa1, 0x76, 0x7a, 0xb3, 0x28, 0x69, 0x9b, 0x8a, 0x6d, 0x72, 0x24, 0x83, 0x9f, 0x78, 0x60, 0xc8, 0x15, 0x7a, 0x9c, 0x11, 0x3e, 0x5d, 0x16, 0x2b, 0x98, 0xc1, 0x21, 0x8f, 0x1d, 0x37, 0x9f, 0xa9, 0x1e, 0xaa, 0x22, 0xad, 0xaa, 0x50, 0x1e, 0xb5, 0x25, 0xb1, 0xb2, 0x61, 0x1f, 0x51, 0x16, 0x36, 0xb4, 0xe6, 0xd7, 0x1e, 0x1b, 0xd9, 0xab, 0x14, 0xc3, 0x0e, 0x1c, 0x0b, 0xaa, 0xa0, 0xbb, 0x7e, 0x1d, 0x91, 0xa9, 0x07, 0xaf, 0x35, 0x1f, 0xfd, 0xa6, 0x83, 0xa8, 0x7a, 0x23, 0x75, 0xa3, 0x3c, 0x9d, 0x1f, 0x24, 0x6f, 0x9f, 0x17, 0x95, 0x2e, 0x24, 0xa3, 0x9a, 0x76, 0x89, 0x22, 0x29, 0xf4, 0x8f, 0xcf, 0x7a, 0x56, 0x2b, 0x1d, 0x8c, 0xa0, 0x6e, 0xea, 0x28, 0x9c, 0x88, 0xf1, 0x60, 0xd2, 0x20, 0x76, 0x8f, 0x36, 0x4c, 0xcb, 0x1d, 0x3d, 0x8c, 0x83, 0x30, 0x52, 0x1c, 0xe3, 0x91, 0x1b, 0x20, 0x0d, 0x1d, 0xec, 0x94, 0x8b, 0x17, 0x98, 0x23, 0xb9, 0x9d, 0x25, 0x18, 0x9d, 0x27, 0xa3, 0xa5, 0x01, 0x1a, 0x68, 0x16, 0x4f, 0xac, 0x0b, 0xd8, 0x49, 0x13, 0xe3, 0xb0, 0x36, 0xd5, 0xdf, 0x1a, 0xde, 0xa8, 0xd9, 0xc1, 0x6a, 0x1a, 0xb1, 0xa7, 0x67, 0xb9, 0x26, 0x1b, 0x99, 0xa3, 0xe9, 0xab, 0xfa, 0x21, 0xa7, 0x9c, 0xdd, 0x9c, 0xda, 0x25, 0xaf, 0x93, 0x4f, 0x8b, 0x29, 0x29, 0xd2, 0x90, 0x10, 0x82, 0x34, 0x2d, 0x59, 0x8d, 0xdb, 0x77, 0x15, 0x2d, 0xba, 0x8a, 0xe9, 0x68, 0x62, 0x2c, 0xd6, 0x87, 0xbd, 0x57, 0xa3, 0x26, 0xe7, 0x85, 0xd2, 0x42, 0x23, 0x21, 0x31, 0x83, 0xe0, 0x28, 0x12, 0x24, 0xfe, 0x87, 0xc6, 0x1b, 0x2f, 0x25, 0x34, 0x8a, 0xc0, 0x0f, 0xfd, 0x42, 0x07, 0x95, 0xc8, 0x10, 0x67, 0x55, 0xa1, 0xa0, 0x6d, 0x12, 0x72, 0x18, 0x04, 0xa7, 0x3d, 0xd8, 0xb6, 0x15, 0x87, 0xa9, 0x3c, 0xd6, 0x55, 0x10, 0x54, 0xa9, 0xd8, 0xd2, 0x53, 0x14, 0x49, 0xa3, 0xd4, 0xbe, 0xbf, 0x1d, 0x4a, 0x97, 0x1c, 0xa8, 0xda, 0x24, 0xeb, 0x93, 0x42, 0x99, 0x3f, 0x29, 0xa3, 0x8f, 0x63, 0x8c, 0x57, 0x30, 0x41, 0x8a, 0x07, 0x7e, 0xc8, 0x33, 0x45, 0x85, 0xbb, 0x71, 0x21, 0x33, 0x97, 0x82, 0xe3, 0x61, 0x8b, 0x32, 0xf3, 0x80, 0x4b, 0x4e, 0xd5, 0x30, 0x87, 0x7e, 0x14, 0x38, 0xce, 0x34, 0x0f, 0x7e, 0x3d, 0x24, 0xbf, 0x38, 0x97, 0x81, 0x7c, 0x17, 0x40, 0x46, 0xec, 0x8d, 0x2f, 0x13, 0xbf, 0x58, 0xdb, 0x94, 0x52, 0x14, 0x74, 0x6c, 0x03, 0xa1, 0x59, 0x15, 0x85, 0x17, 0xb3, 0x9d, 0x68, 0xda, 0x3e, 0x15, 0x5e, 0xa0, 0x41, 0xd7, 0x90, 0x13, 0xdc, 0xa0, 0xd2, 0xd1, 0xd2, 0x0f, 0xa2, 0x9c, 0x78, 0xc4, 0x5c, 0x1f, 0x86, 0x92, 0x3d, 0xab, 0xe2, 0x29, 0x42, 0x8c, 0x20, 0x99, 0x48, 0x31, 0xca, 0x86, 0x8a, 0x87, 0xfc, 0x36, 0x6b, 0x81, 0x6c, 0x79, 0xd3, 0x39, 0x3a, 0x7c, 0xb3, 0x6a, 0xdf, 0x3b, 0xb3, 0x79, 0xd5, 0x5a, 0x0c, 0x3d, 0x09, 0x77, 0xbf, 0x46, 0x17, 0x41, 0x08, 0x78, 0x26, 0x33, 0x91, 0x46, 0x8d, 0x79, 0xbc, 0x22, 0xc7, 0x4e, 0x63, 0x7e, 0x7b, 0x18, 0x41, 0x5e, 0x28, 0x86, 0xb6, 0x16, 0x16, 0x6f, 0x32, 0x96, 0x66, 0x1a, 0xda, 0x77, 0xdb, 0x9d, 0x50, 0x1a, 0xed, 0x18, 0x1d, 0x93, 0x8a, 0xdd, 0x57, 0x16, 0x16, 0x96, 0xc0, 0xdb, 0x24, 0x13, 0x31, 0x97, 0x10, 0xd4, 0x7b, 0x12, 0x1e, 0x91, 0xf7, 0xc3, 0x93, 0x25, 0x85, 0x89, 0x07, 0xab, 0xfa, 0x32, 0x4e, 0x81, 0xe4, 0x97, 0x4e, 0x39, 0xe7, 0x7c, 0xa0, 0x85, 0x86, 0x40, 0xcc, 0x77, 0xec, 0x75, 0x98, 0x45, 0x9a, 0x74, 0x5d, 0x66, 0x0a, 0x49, 0x5e, 0x72, 0x01, 0x55, 0xe0, 0x4c, 0xd8, 0x71, 0x61, 0x43, 0xa2, 0x52, 0xc5, 0x73, 0xfe, 0x33, 0xa2, 0x57, 0xc8, 0x76, 0x9a, 0x24, 0x4c, 0x5e, 0xaf, 0x7b, 0xb2, 0x19, 0x39, 0x6c, 0x80, 0x88, 0x14, 0x18, 0xbc, 0x7a, 0x6a, 0x95, 0x7b, 0x1a, 0x7c, 0x82, 0xa1, 0x99, 0xb6, 0x1b, 0x87, 0x19, 0x5d, 0x87, 0x9e, 0xe1, 0x8f, 0x17, 0xf3, 0x8b, 0x4f, 0xdf, 0x27, 0x14, 0x3b, 0x8d, 0x43, 0xda, 0x59, 0x1e, 0x2a, 0x86, 0xd5, 0xc5, 0x76, 0x30, 0x37, 0x7e, 0x4c, 0xab, 0x4d, 0x3c, 0x6e, 0x77, 0x8e, 0x97, 0x22, 0x46, 0xac, 0x72, 0x25, 0x85, 0x86, 0x4e, 0x2e, 0x6e, 0xa5, 0x74, 0x89, 0x53, 0xc2, 0x6c, 0x39, 0x64, 0x6b, 0x58, 0xd8, 0x6b, 0xba, 0x53, 0xd0, 0x5d, 0x0e, 0x6c, 0xe1, 0x43, 0xfb, 0x61, 0xae, 0x70, 0x4d, 0x34, 0xcd, 0x66, 0x6a, 0x73, 0xad, 0x26, 0x58, 0x6a, 0xe5, 0x78, 0x2f, 0x19, 0x79, 0x78, 0x34, 0x83, 0x46, 0x19, 0xc1, 0x85, 0xca, 0x91, 0xd3, 0x1c, 0x07, 0x91, 0x24, 0x9a, 0x9c, 0x1d, 0x4c, 0x1c, 0xac, 0x76, 0xeb, 0xe3, 0x1f, 0x1a, 0xc8, 0x7b, 0x39, 0xe4, 0x16, 0x15, 0xb7, 0x82, 0x21, 0xe7, 0x7b, 0x2b, 0x65, 0x7a, 0xc7, 0xc7, 0xe9, 0x3d, 0x52, 0x72, 0xa2, 0xab, 0x4f, 0x4b, 0x36, 0x6c, 0x41, 0x96, 0x61, 0x55, 0x75, 0x68, 0x79, 0x85, 0x05, 0x5c, 0xff, 0x66, 0x13, 0x74, 0x70, 0x63, 0x6c, 0x64, 0x67, 0x64, 0x98, 0x67, 0x71, 0x65, 0x42, 0x54, 0x49, 0x6a, 0xf8, 0x68, 0x11, 0x45, 0x62, 0x6e, 0xf0, 0x6b, 0xc1, 0x36, 0x6f, 0x72, 0xd0, 0x6f, 0x2a, 0x28, 0x50, 0x75, 0xf8, 0x73, 0x7d, 0x1a, 0x52, 0x82, 0xe8, 0x80, 0x03, 0x1b, 0xaa, 0x8e, 0xe5, 0x8a, 0xdd, 0x1c, 0xfb, 0x9c, 0xff, 0x98, 0xbf, 0x1f, 0x8a, 0x23, 0xe9, 0x65, 0xe5, 0xe1, 0x73, 0x24, 0x62, 0x6a, 0x7d, 0xe3, 0x8d, 0x26, 0x86, 0x73, 0x11, 0xea, 0xac, 0x3c, 0x3a, 0x6c, 0x5b, 0xca, 0x31, 0x4e, 0x80, 0x66, 0x34, 0xab, 0xca, 0x5b, 0xd8, 0x61, 0xdf, 0x96, 0xb1, 0x65, 0x8e, 0x5f, 0x71, 0x85, 0x68, 0x6b, 0xc9, 0x5e, 0x28, 0x75, 0x0d, 0x70, 0xa7, 0x5d, 0x44, 0x64, 0xc2, 0x74, 0xb1, 0x5d, 0x22, 0x54, 0xac, 0x78, 0x7d, 0x60, 0x3a, 0x45, 0xe2, 0x7b, 0xe4, 0x64, 0x1d, 0x37, 0x7f, 0x7f, 0x30, 0x68, 0x4d, 0x29, 0xf6, 0x81, 0x9a, 0x6d, 0x18, 0x1d, 0x06, 0x8f, 0x2c, 0x79, 0x8d, 0x1e, 0x3e, 0x99, 0xb4, 0x82, 0xb0, 0x1e, 0x60, 0xa7, 0xcc, 0x91, 0x5f, 0x20, 0x32, 0x2b, 0xc8, 0x30, 0x3d, 0xd1, 0x4d, 0x2e, 0x91, 0x54, 0x36, 0xe2, 0x18, 0x3a, 0x40, 0x5f, 0xdb, 0xef, 0x9f, 0x4f, 0xf5, 0x5d, 0x40, 0xcd, 0xf9, 0x61, 0xbd, 0x5a, 0x29, 0xac, 0x3b, 0x6c, 0xa8, 0x58, 0x56, 0x97, 0xc4, 0x74, 0x56, 0x57, 0x0c, 0x87, 0x29, 0x78, 0xc0, 0x56, 0x59, 0x78, 0x3b, 0x7d, 0x5b, 0x54, 0xc7, 0x66, 0x85, 0x81, 0x15, 0x53, 0xa7, 0x55, 0x98, 0x85, 0x1c, 0x56, 0x3f, 0x46, 0x2e, 0x88, 0x05, 0x59, 0xb3, 0x37, 0xe8, 0x8a, 0x7a, 0x5d, 0x68, 0x2a, 0xb3, 0x8c, 0x87, 0x64, 0xd3, 0x1f, 0x37, 0x98, 0xd0, 0x70, 0x07, 0x1f, 0x5f, 0xa4, 0xb0, 0x79, 0x35, 0x1e, 0xfa, 0xad, 0xf7, 0x83, 0x4f, 0x1f, 0xfb, 0x2b, 0xc3, 0x30, 0x26, 0xd1, 0x75, 0x2c, 0xfe, 0x2e, 0x97, 0xd2, 0x7b, 0x57, 0x0d, 0x4b, 0x13, 0xf7, 0xb0, 0x67, 0xe1, 0x4d, 0x9e, 0xd3, 0xb5, 0x76, 0x03, 0x4e, 0x8b, 0xb0, 0x32, 0x7d, 0x14, 0x4f, 0x34, 0x9b, 0xbf, 0x81, 0x39, 0x4e, 0x54, 0x8b, 0x16, 0x85, 0x57, 0x4d, 0x2a, 0x7a, 0xaf, 0x89, 0xe2, 0x4a, 0x80, 0x68, 0x61, 0x8d, 0x38, 0x49, 0x49, 0x57, 0xa6, 0x90, 0x77, 0x4a, 0x0e, 0x47, 0xb4, 0x93, 0x62, 0x4c, 0xc9, 0x38, 0x0c, 0x95, 0x9c, 0x50, 0x83, 0x2b, 0x1f, 0x97, 0xb8, 0x55, 0xa3, 0x1e, 0x9b, 0xa2, 0xca, 0x63, 0x1b, 0x1f, 0x9d, 0xad, 0x3f, 0x6f, 0x1d, 0x1d, 0x45, 0xb2, 0xb3, 0x77, 0x4f, 0x1d, 0xff, 0x2c, 0xef, 0x2e, 0x69, 0xd2, 0x5f, 0x2d, 0x30, 0x2e, 0x74, 0xd2, 0x8a, 0x71, 0x63, 0x3a, 0x03, 0xff, 0xff, 0x82, 0xdf, 0x3d, 0xf9, 0xdb, 0xfe, 0x8a, 0xf2, 0x42, 0x90, 0xbc, 0x4d, 0x8d, 0x7c, 0x44, 0xa8, 0xa3, 0xc3, 0x8f, 0x26, 0x44, 0xe2, 0x91, 0xc8, 0x93, 0x59, 0x42, 0xcd, 0x7e, 0xb7, 0x96, 0xfc, 0x3f, 0x57, 0x6c, 0x13, 0x99, 0x96, 0x3d, 0xab, 0x5b, 0x35, 0x9b, 0xe9, 0x3c, 0x38, 0x4a, 0x8c, 0x9e, 0x92, 0x3b, 0x3a, 0x39, 0x12, 0xa1, 0x9b, 0x3d, 0x89, 0x29, 0x69, 0xa3, 0xf8, 0x42, 0x10, 0x1a, 0x81, 0xad, 0xb7, 0x53, 0x55, 0x1c, 0xbf, 0xb4, 0x1e, 0x5e, 0x85, 0x19, 0xe0, 0xbc, 0x9a, 0x6d, 0xe2, 0x1a, 0x35, 0x2c, 0xa9, 0x2f, 0x2d, 0xd0, 0xba, 0x76, 0xd1, 0x2c, 0x60, 0xea, 0xbe, 0x88, 0xe7, 0x2c, 0x93, 0xe7, 0xdc, 0x9f, 0x1f, 0x30, 0xf4, 0xe7, 0x16, 0xa2, 0x50, 0x34, 0x23, 0xcc, 0x66, 0xa0, 0xb7, 0x37, 0xac, 0xb1, 0x90, 0xa0, 0x34, 0x38, 0x0d, 0x9b, 0xf3, 0xa3, 0x1c, 0x35, 0x9a, 0x85, 0x73, 0xa5, 0x50, 0x33, 0x78, 0x71, 0xc6, 0xa6, 0xf5, 0x31, 0xae, 0x5f, 0xee, 0xa8, 0x8b, 0x2f, 0xd0, 0x4e, 0x8b, 0xab, 0xf1, 0x2a, 0xb6, 0x3b, 0x70, 0xaf, 0x61, 0x27, 0x37, 0x27, 0x90, 0xb9, 0x77, 0x2d, 0x2d, 0x1d, 0x24, 0xc2, 0x37, 0x40, 0xa9, 0x1e, 0x32, 0xc5, 0x9c, 0x52, 0x36, 0x1f, 0x92, 0xc6, 0xb6, 0x61, 0xb5, 0x1f, 0x60, 0x7f, 0x38, 0x29, 0x91, 0xe5, 0x5d, 0x8c, 0xad, 0x29, 0x43, 0xe2, 0x2b, 0x9b, 0x4b, 0x29, 0xd8, 0xdf, 0xf8, 0xab, 0x06, 0x29, 0x6f, 0xdd, 0x32, 0xba, 0xb1, 0x24, 0xeb, 0xdc, 0x71, 0xb8, 0xee, 0x26, 0xc9, 0xc3, 0x80, 0xb7, 0x15, 0x26, 0xe1, 0xa9, 0xe9, 0xb6, 0xdc, 0x25, 0x54, 0x90, 0x4d, 0xb6, 0xcc, 0x24, 0x25, 0x7a, 0x34, 0xb6, 0x5b, 0x23, 0x1f, 0x66, 0x11, 0xb6, 0xf1, 0x1f, 0xf1, 0x53, 0x40, 0xb9, 0xa8, 0x19, 0x75, 0x3e, 0x01, 0xbd, 0x7c, 0x12, 0xb9, 0x28, 0xf4, 0xc7, 0x6e, 0x26, 0x71, 0x26, 0xd2, 0xc8, 0xb5, 0x2a, 0x53, 0x26, 0xfc, 0xc9, 0x4a, 0x3f, 0x7b, 0x23, 0xc8, 0xc9, 0xbe, 0x52, 0x16, 0x22, 0xe8, 0x8d, 0x88, 0x28, 0x45, 0xe0, 0x7a, 0x9e, 0x2b, 0x28, 0xd7, 0xde, 0x07, 0xa9, 0x1e, 0x28, 0x81, 0xdc, 0x16, 0xb5, 0x30, 0x27, 0x14, 0xdb, 0xae, 0xc5, 0x2c, 0x21, 0xe7, 0xdb, 0xf2, 0xcd, 0xde, 0x1e, 0x6d, 0xd0, 0x83, 0xca, 0x96, 0x1f, 0x38, 0xb7, 0x72, 0xca, 0x9e, 0x1d, 0x06, 0x9d, 0xb5, 0xc9, 0x34, 0x1d, 0x50, 0x84, 0x3d, 0xc8, 0x01, 0x1e, 0x2d, 0x70, 0x53, 0xc8, 0xf2, 0x1e, 0x58, 0x5d, 0x0d, 0xc9, 0x88, 0x1c, 0xd5, 0x4b, 0x0e, 0xcb, 0x13, 0x1b, 0x74, 0x2f, 0xcf, 0xcb, 0x36, 0x27, 0x3a, 0x2a, 0x11, 0xcb, 0x16, 0x29, 0xe7, 0x29, 0x2b, 0xcb, 0x02, 0x2b, 0x63, 0x28, 0xb1, 0xca, 0xf5, 0x2c, 0x53, 0x28, 0x65, 0xa0, 0x31, 0x28, 0x72, 0xdd, 0x1c, 0xa7, 0xd0, 0x28, 0x38, 0xdb, 0xc0, 0xae, 0xd8, 0x27, 0x3e, 0xda, 0xb8, 0xc3, 0xb6, 0x23, 0xe6, 0xdd, 0x1f, 0xd3, 0x70, 0x1f, 0x48, 0xdd, 0x5d, 0xd0, 0xfd, 0x21, 0x03, 0xcf, 0x6e, 0xd1, 0xbb, 0x20, 0x11, 0xbc, 0xc1, 0xd1, 0xd2, 0x1d, 0x76, 0xa4, 0x9c, 0xd0, 0x10, 0x1e, 0x05, 0x8a, 0x84, 0xcd, 0xd7, 0x20, 0x20, 0x77, 0x1b, 0xcd, 0x26, 0x21, 0x09, 0x64, 0x14, 0xcd, 0x6b, 0x21, 0x4a, 0x53, 0xb1, 0xce, 0xa6, 0x1e, 0x5a, 0x38, 0x71, 0xcc, 0x88, 0x27, 0x31, 0x2b, 0x69, 0xcc, 0x22, 0x29, 0x3f, 0x2a, 0x64, 0xcb, 0xe1, 0x2a, 0x90, 0x29, 0xc3, 0xcb, 0xb4, 0x2b, 0x7a, 0x29, 0x57, 0x1e, 0xe1, 0xad, 0xdb, 0xb9, 0x26, 0x20, 0xf0, 0xad, 0x2c, 0xb1, 0x62, 0x21, 0x1e, 0xad, 0x1d, 0xaf, 0xe1, 0x22, 0x48, 0xaf, 0xea, 0xad, 0x45, 0x22, 0x63, 0xb3, 0x4b, 0xaa, 0xe7, 0x23, 0x2c, 0xab, 0x21, 0x9d, 0xae, 0x27, 0x3b, 0xa8, 0x5e, 0x96, 0x8b, 0x27, 0x99, 0xa7, 0x76, 0x93, 0xdc, 0x28, 0xc0, 0xa7, 0x02, 0x88, 0x79, 0x29, 0xf3, 0xa5, 0xd8, 0x7e, 0xe6, 0x29, 0x80, 0xa5, 0x47, 0x75, 0x2d, 0x25, 0xe6, 0xa5, 0x2e, 0x65, 0xd0, 0x18, 0x93, 0xa5, 0x97, 0x45, 0x55, 0x1e, 0x0c, 0xbc, 0xd5, 0x28, 0xdf, 0x23, 0x85, 0xc3, 0x5c, 0x28, 0x32, 0x26, 0x8a, 0xc6, 0xbf, 0x27, 0xcf, 0x28, 0x26, 0xc7, 0x2e, 0x27, 0x0d, 0x1d, 0x26, 0xae, 0x18, 0xc5, 0xea, 0x1d, 0x81, 0xae, 0x2d, 0xbe, 0x86, 0x20, 0xcf, 0xac, 0xba, 0xb1, 0x3a, 0x21, 0x0a, 0xac, 0x8f, 0xaf, 0x1e, 0x22, 0x7b, 0xaf, 0x2b, 0xab, 0x3c, 0x21, 0xb5, 0xb0, 0xb4, 0xa7, 0x3d, 0x23, 0x7c, 0xa9, 0x6c, 0x99, 0x83, 0x27, 0x46, 0xa6, 0xd1, 0x93, 0x9d, 0x27, 0xe8, 0xa6, 0x2b, 0x88, 0xba, 0x29, 0xda, 0xa4, 0xdb, 0x7e, 0x6e, 0x29, 0x06, 0xa3, 0x14, 0x71, 0x02, 0x25, 0x39, 0xa3, 0x05, 0x62, 0xcd, 0x16, 0x95, 0xa1, 0xcb, 0x40, 0x7a, 0x1a, 0xf0, 0xa6, 0x53, 0x25, 0x0d, 0x22, 0x0b, 0xb1, 0x3e, 0x23, 0x01, 0x25, 0xe8, 0xbb, 0x2f, 0x23, 0x4f, 0x27, 0xfa, 0xbd, 0xea, 0x22, 0xc0, 0x17, 0xbe, 0xb7, 0xad, 0xd9, 0x4b, 0x1c, 0xe0, 0xad, 0x7c, 0xc5, 0xaf, 0x1d, 0x28, 0xad, 0x73, 0xbe, 0xd4, 0x1e, 0xb1, 0xac, 0x62, 0xb3, 0x17, 0x20, 0xdf, 0xab, 0x53, 0xad, 0x70, 0x22, 0x66, 0xab, 0xa7, 0xa6, 0x4c, 0x23, 0xec, 0xa5, 0xe3, 0x9b, 0xc3, 0x23, 0xed, 0xa1, 0xc2, 0x90, 0x14, 0x29, 0x97, 0x9c, 0xd1, 0x84, 0x69, 0x2d, 0x55, 0x98, 0x71, 0x76, 0xcf, 0x2b, 0x5c, 0x95, 0xc2, 0x66, 0xcf, 0x1f, 0xc2, 0x96, 0x78, 0x4f, 0xd8, 0x16, 0xb0, 0x99, 0x28, 0x36, 0x67, 0x1a, 0x2c, 0x99, 0xcd, 0x1e, 0x2b, 0x22, 0x2a, 0xa1, 0x77, 0x1b, 0xf2, 0x26, 0xb8, 0xab, 0x1a, 0x1c, 0x68, 0x29, 0xd2, 0xb1, 0xd1, 0x1e, 0x04, 0x17, 0xe9, 0xae, 0x66, 0xda, 0xfa, 0x16, 0x2b, 0xb4, 0x3f, 0xd8, 0xde, 0x1c, 0x5c, 0xac, 0x52, 0xc5, 0x3f, 0x1c, 0x77, 0xab, 0xe2, 0xbe, 0x40, 0x1d, 0xbd, 0xaa, 0x2e, 0xb2, 0xde, 0x25, 0x4a, 0xa3, 0x09, 0xa3, 0x6f, 0x28, 0xfb, 0x9e, 0x16, 0x96, 0xb4, 0x2d, 0x76, 0x9a, 0xf7, 0x8b, 0xe9, 0x31, 0x14, 0x96, 0x33, 0x7e, 0xe6, 0x32, 0x70, 0x93, 0x16, 0x70, 0xd0, 0x31, 0x1d, 0x8f, 0xfd, 0x60, 0x40, 0x2b, 0xb5, 0x8f, 0x72, 0x48, 0x20, 0x25, 0x3b, 0x8d, 0x8d, 0x2d, 0xd5, 0x25, 0xd7, 0x8c, 0x9c, 0x15, 0xee, 0x27, 0x4c, 0x94, 0xb8, 0x11, 0x51, 0x41, 0xce, 0x9d, 0x77, 0x0f, 0x40, 0x62, 0xd0, 0xaa, 0x32, 0x11, 0x45, 0x19, 0x92, 0xab, 0x0d, 0xdb, 0xc8, 0x17, 0xc1, 0xac, 0x7c, 0xda, 0x0d, 0x13, 0xaa, 0xaf, 0x57, 0xd7, 0x51, 0x17, 0x68, 0xaa, 0x69, 0xc6, 0x0b, 0x1d, 0x1d, 0xa3, 0xa4, 0xb7, 0x00, 0x28, 0xa8, 0x9d, 0xd8, 0xa4, 0x53, 0x2e, 0xbb, 0x97, 0xdf, 0x95, 0x5c, 0x33, 0xc2, 0x92, 0xaa, 0x86, 0xa5, 0x37, 0x2b, 0x8e, 0x24, 0x78, 0xdc, 0x38, 0x1a, 0x8a, 0xb1, 0x69, 0xb3, 0x37, 0xaa, 0x87, 0xff, 0x56, 0xd5, 0x36, 0xe1, 0x87, 0x04, 0x40, 0x1c, 0x39, 0x93, 0x87, 0xb7, 0x2a, 0xba, 0x3c, 0xe3, 0x86, 0xf3, 0x16, 0x6a, 0x4a, 0x35, 0x91, 0x8b, 0x13, 0x7a, 0x5d, 0x25, 0x9c, 0x5d, 0x14, 0x3b, 0x6b, 0xf5, 0xa7, 0xaf, 0x14, 0x53, 0x19, 0xe9, 0xa0, 0xfd, 0xdd, 0x22, 0x1a, 0xc9, 0xa3, 0x6a, 0xda, 0x68, 0x17, 0x6a, 0xa6, 0xc5, 0xd7, 0xe0, 0x0f, 0x2d, 0xa6, 0x8c, 0xcf, 0xcb, 0x21, 0xac, 0x9d, 0x17, 0xb8, 0x38, 0x2f, 0x83, 0x94, 0x72, 0xa2, 0xbb, 0x36, 0xae, 0x8e, 0x8b, 0x91, 0x1b, 0x3c, 0x38, 0x8a, 0x13, 0x82, 0x82, 0x40, 0xcf, 0x85, 0x49, 0x73, 0x7d, 0x42, 0xa5, 0x82, 0x89, 0x62, 0x4f, 0x44, 0x52, 0x80, 0x4b, 0x4e, 0x56, 0x48, 0x42, 0x80, 0xb2, 0x3a, 0x15, 0x4e, 0x5e, 0x83, 0x05, 0x2b, 0x3e, 0x55, 0x1e, 0x83, 0x55, 0x19, 0x8a, 0x63, 0xfe, 0x8d, 0xe5, 0x16, 0x82, 0x70, 0x6e, 0x98, 0x34, 0x17, 0x6a, 0x7c, 0x65, 0xa1, 0x9a, 0x1b, 0xb1, 0x1a, 0xc5, 0x97, 0x83, 0xe0, 0x74, 0x1b, 0x38, 0x99, 0xb0, 0xdd, 0xdb, 0x18, 0x89, 0x9d, 0x1d, 0xdb, 0x36, 0x1b, 0x04, 0x9b, 0x57, 0xcf, 0x31, 0x2b, 0xa9, 0x92, 0x54, 0xb7, 0xe9, 0x38, 0x89, 0x8a, 0x76, 0xa2, 0x06, 0x42, 0x12, 0x85, 0x0a, 0x8f, 0x6c, 0x48, 0xd9, 0x80, 0xf4, 0x7e, 0xa2, 0x4e, 0x14, 0x7d, 0x93, 0x6e, 0xfd, 0x52, 0x42, 0x7b, 0x55, 0x5e, 0x47, 0x55, 0xdc, 0x7a, 0x6f, 0x4b, 0xee, 0x5a, 0x62, 0x7c, 0x76, 0x3a, 0xbc, 0x5f, 0xca, 0x7f, 0x84, 0x2c, 0xe5, 0x66, 0xcf, 0x82, 0x3c, 0x1f, 0x22, 0x6f, 0xf6, 0x8c, 0x3d, 0x19, 0x42, 0x7c, 0xd3, 0x97, 0xa0, 0x1a, 0xc5, 0x8d, 0x80, 0xa1, 0xeb, 0x1d, 0x69, 0x1d, 0xee, 0x8b, 0x1c, 0xe2, 0xc7, 0x1d, 0x6e, 0x8f, 0xd3, 0xe2, 0x39, 0x1c, 0xb2, 0x93, 0x60, 0xe1, 0x42, 0x27, 0x20, 0x8f, 0x74, 0xd0, 0x3d, 0x37, 0xfb, 0x86, 0xdc, 0xb6, 0x5e, 0x44, 0xf8, 0x80, 0x1d, 0xa1, 0xac, 0x4f, 0x58, 0x7a, 0xfc, 0x8f, 0x3f, 0x56, 0xc8, 0x77, 0xdb, 0x7d, 0xd6, 0x5c, 0xe6, 0x75, 0x7f, 0x6d, 0xb4, 0x61, 0xbc, 0x74, 0xca, 0x5c, 0xc0, 0x66, 0x00, 0x75, 0xc8, 0x4c, 0x6a, 0x6a, 0x2f, 0x78, 0xa7, 0x3c, 0xcd, 0x6e, 0x67, 0x7b, 0xde, 0x2e, 0x64, 0x73, 0x88, 0x7f, 0xff, 0x20, 0x36, 0x7c, 0x19, 0x87, 0xea, 0x19, 0xf4, 0x89, 0x03, 0x95, 0xb5, 0x1c, 0x7f, 0x97, 0x78, 0xa2, 0x5c, 0x1e, 0x41, 0x20, 0x54, 0x7b, 0x2f, 0xe5, 0xd1, 0x22, 0x09, 0x7f, 0x0a, 0xe5, 0x9b, 0x26, 0x1f, 0x84, 0xe2, 0xe5, 0x75, 0x36, 0x87, 0x82, 0xee, 0xd1, 0x8c, 0x47, 0x70, 0x7b, 0x09, 0xb6, 0x60, 0x54, 0xa1, 0x74, 0xd5, 0xa1, 0x48, 0x5e, 0x5b, 0x71, 0x3f, 0x8f, 0x02, 0x66, 0x48, 0x6f, 0x53, 0x7e, 0x57, 0x6c, 0xf9, 0x6d, 0xf4, 0x6e, 0x28, 0x70, 0x5b, 0x6e, 0x13, 0x5d, 0x0d, 0x74, 0x16, 0x70, 0x61, 0x4d, 0x72, 0x77, 0xad, 0x73, 0xb6, 0x3e, 0x8f, 0x7b, 0x6a, 0x77, 0xb1, 0x30, 0x57, 0x7e, 0xe2, 0x7b, 0x7b, 0x21, 0xfc, 0x86, 0x4c, 0x82, 0xb6, 0x1b, 0x2d, 0x94, 0x53, 0x90, 0x60, 0x1e, 0x23, 0xa0, 0x5c, 0x9b, 0x11, 0x20, 0x28, 0x26, 0xa0, 0x6b, 0x2a, 0xe5, 0x16, 0x29, 0x6b, 0x70, 0x63, 0xe8, 0x6a, 0x33, 0x72, 0x74, 0x3f, 0xe6, 0xdb, 0x49, 0x1b, 0x74, 0x78, 0xd2, 0xfb, 0x59, 0x21, 0x6e, 0xb2, 0xb6, 0xfc, 0x65, 0xd0, 0x6a, 0x86, 0xa1, 0x22, 0x6f, 0x4d, 0x68, 0xa9, 0x8f, 0xc8, 0x75, 0x5d, 0x67, 0x7a, 0x7f, 0x30, 0x7a, 0x28, 0x66, 0xd7, 0x6e, 0x34, 0x7e, 0x2b, 0x66, 0x70, 0x5d, 0xcb, 0x81, 0xad, 0x68, 0x3f, 0x4e, 0x33, 0x84, 0xf4, 0x6b, 0xea, 0x3f, 0x5f, 0x88, 0x29, 0x70, 0x20, 0x31, 0x99, 0x8a, 0xd5, 0x74, 0x91, 0x24, 0x09, 0x92, 0x6d, 0x7d, 0x1e, 0x1e, 0x49, 0x9e, 0x8e, 0x87, 0x3f, 0x1f, 0x0b, 0xab, 0xf3, 0x94, 0x15, 0x21, 0x1c, 0x2b, 0xd5, 0x30, 0x47, 0xd1, 0x59, 0x33, 0x21, 0x5a, 0x64, 0xe6, 0x71, 0x47, 0x68, 0x65, 0x71, 0xed, 0x39, 0x5d, 0xbe, 0x65, 0xdc, 0xd5, 0xcf, 0x6c, 0xc0, 0x63, 0x16, 0xb8, 0x37, 0x76, 0xf5, 0x61, 0x9c, 0xa2, 0x52, 0x7e, 0x92, 0x60, 0x74, 0x91, 0x72, 0x82, 0xc5, 0x5f, 0x9b, 0x82, 0x08, 0x87, 0x41, 0x5e, 0x2a, 0x6f, 0xaa, 0x8b, 0x4f, 0x5d, 0x64, 0x5e, 0xb7, 0x8e, 0xf5, 0x5e, 0xfb, 0x4e, 0xaa, 0x91, 0xa7, 0x62, 0x48, 0x3f, 0xfa, 0x94, 0x3b, 0x66, 0xd5, 0x32, 0x74, 0x96, 0x50, 0x6b, 0x3c, 0x25, 0xba, 0x9d, 0x28, 0x74, 0x02, 0x1f, 0x29, 0xa9, 0xbc, 0x7d, 0xc8, 0x1f, 0xbc, 0xb2, 0xbb, 0x89, 0x11, 0x1f, 0xc7, 0x2b, 0xd0, 0x30, 0x30, 0xd1, 0x80, 0x2d, 0x18, 0x2e, 0xaa, 0xd2, 0x92, 0x63, 0x62, 0x53, 0xba, 0xf1, 0xeb, 0x75, 0xa8, 0x57, 0xbd, 0xda, 0xe2, 0x81, 0x22, 0x58, 0x26, 0xbd, 0x58, 0x87, 0x93, 0x58, 0xbc, 0xa7, 0x31, 0x8b, 0xc5, 0x58, 0x0f, 0x95, 0xb2, 0x90, 0x20, 0x56, 0xe8, 0x84, 0x48, 0x94, 0x6d, 0x54, 0x43, 0x71, 0x94, 0x97, 0xbe, 0x53, 0x0e, 0x60, 0x39, 0x9b, 0x1a, 0x53, 0xde, 0x50, 0x6a, 0x9d, 0xf4, 0x56, 0x9e, 0x40, 0x87, 0x9f, 0xcb, 0x5a, 0x7f, 0x33, 0xa2, 0xa1, 0x77, 0x5f, 0x66, 0x26, 0x34, 0xa8, 0x43, 0x67, 0xe3, 0x1e, 0xd4, 0xaf, 0xb5, 0x72, 0x71, 0x1d, 0x80, 0xb7, 0xd3, 0x7c, 0x17, 0x1e, 0x1e, 0x2c, 0xfc, 0x2e, 0x73, 0xd2, 0x6a, 0x2d, 0x4a, 0x2e, 0x87, 0xd2, 0xa2, 0x7d, 0x88, 0x40, 0xea, 0xf9, 0x73, 0x92, 0x22, 0x49, 0x0a, 0xe5, 0x5b, 0x98, 0x73, 0x4c, 0x9d, 0xc8, 0xbd, 0x98, 0x2c, 0x4e, 0xea, 0xaf, 0x48, 0x99, 0xb5, 0x4f, 0x23, 0x9c, 0xb1, 0x9e, 0x0d, 0x4c, 0xae, 0x88, 0x25, 0xa1, 0xfa, 0x49, 0x58, 0x74, 0x58, 0xa4, 0x8c, 0x47, 0x97, 0x63, 0x4f, 0xa7, 0x13, 0x46, 0xe3, 0x52, 0x84, 0xa9, 0xd7, 0x46, 0xbb, 0x41, 0x7e, 0xac, 0x1c, 0x4a, 0x59, 0x31, 0x5e, 0xae, 0x95, 0x4d, 0x00, 0x22, 0x22, 0xb3, 0xc3, 0x54, 0x4b, 0x19, 0xe0, 0xbc, 0xf1, 0x63, 0x8d, 0x17, 0xa1, 0xc3, 0x84, 0x70, 0xad, 0x1a, 0xe2, 0x2c, 0xb4, 0x2f, 0x37, 0xd0, 0xc6, 0x7e, 0x2e, 0x2f, 0xb4, 0xed, 0xd9, 0x8a, 0x9f, 0x34, 0x4f, 0xea, 0x83, 0xab, 0xb4, 0x3b, 0xa9, 0xf1, 0x72, 0xae, 0x24, 0x3e, 0xca, 0xd7, 0x1a, 0xab, 0x78, 0x42, 0xea, 0xbb, 0xf1, 0xaa, 0x80, 0x43, 0x17, 0xa6, 0x4f, 0xae, 0x1d, 0x3e, 0xfe, 0x8e, 0x90, 0xb0, 0x4b, 0x3b, 0x8b, 0x79, 0x6f, 0xb2, 0x76, 0x38, 0xe4, 0x67, 0x69, 0xb4, 0x78, 0x36, 0xd0, 0x56, 0x78, 0xb7, 0x5f, 0x33, 0xcc, 0x43, 0xe2, 0xbb, 0x18, 0x31, 0x3c, 0x2e, 0xdc, 0xbd, 0xf4, 0x33, 0xc6, 0x1c, 0xb8, 0xc5, 0xdb, 0x47, 0x12, 0x20, 0x8d, 0xc7, 0xba, 0x57, 0x9f, 0x21, 0x0e, 0xc8, 0x63, 0x66, 0x37, 0x20, 0x3a, 0x82, 0x07, 0x2b, 0x91, 0xe7, 0x88, 0x8f, 0xd9, 0x2b, 0xa0, 0xe4, 0xcd, 0xa1, 0x62, 0x2c, 0xd8, 0xe3, 0x02, 0xb2, 0x13, 0x2e, 0x04, 0xe2, 0x74, 0xc5, 0xff, 0x2f, 0xcf, 0xe6, 0xd3, 0xc4, 0x3e, 0x30, 0x3f, 0xce, 0xa5, 0xc2, 0x5f, 0x30, 0x34, 0xb4, 0xd9, 0xc2, 0x4a, 0x2e, 0x2c, 0x9a, 0x8d, 0xc2, 0x44, 0x2c, 0x29, 0x83, 0x36, 0xc2, 0x50, 0x29, 0xe5, 0x6e, 0x2e, 0xc3, 0x3a, 0x26, 0xaf, 0x5a, 0xdf, 0xc5, 0xaf, 0x20, 0xea, 0x45, 0x90, 0xc9, 0x49, 0x1b, 0x5a, 0x30, 0x5a, 0xca, 0xde, 0x2a, 0x13, 0x28, 0xe8, 0xca, 0xd1, 0x2c, 0xbe, 0x28, 0x27, 0xca, 0xaf, 0x45, 0xab, 0x24, 0xbe, 0xcb, 0x6f, 0x57, 0x4b, 0x23, 0xc4, 0x92, 0xa4, 0x29, 0xeb, 0xe1, 0x9d, 0xa2, 0x41, 0x2a, 0x9a, 0xdf, 0xc3, 0xad, 0xf7, 0x2a, 0xad, 0xde, 0xa6, 0xc0, 0x34, 0x2a, 0x26, 0xe0, 0x49, 0xcc, 0xa9, 0x26, 0xf4, 0xe1, 0xee, 0xda, 0x59, 0x1e, 0x21, 0xdf, 0x55, 0xd7, 0xf1, 0x1d, 0xf2, 0xc3, 0x7e, 0xd7, 0x2a, 0x1c, 0x9b, 0xa7, 0x91, 0xd1, 0x97, 0x1f, 0xb5, 0x89, 0xb5, 0xce, 0xf2, 0x21, 0x53, 0x74, 0xc8, 0xce, 0x23, 0x21, 0xe3, 0x61, 0x7a, 0xce, 0x82, 0x20, 0x3a, 0x4e, 0x3b, 0xcf, 0xe9, 0x1f, 0xac, 0x32, 0xcb, 0xcc, 0xe2, 0x28, 0xd0, 0x2b, 0x43, 0xcc, 0x42, 0x2b, 0x06, 0x2a, 0x02, 0xcb, 0xe9, 0x2c, 0x40, 0x29, 0x56, 0xcb, 0xb1, 0x2d, 0x09, 0x28, 0xea, 0xa1, 0xc9, 0x29, 0xaa, 0xde, 0x8b, 0xab, 0xf5, 0x29, 0xa1, 0xdd, 0x6c, 0xb3, 0x88, 0x29, 0x42, 0xdd, 0x9a, 0xc6, 0xa8, 0x26, 0x7c, 0xdf, 0xf7, 0xd9, 0xb6, 0x23, 0x21, 0xe1, 0xd2, 0xd6, 0x89, 0x23, 0xd3, 0xd4, 0xf2, 0xd6, 0x5e, 0x23, 0x74, 0xc0, 0xd2, 0xd6, 0x4d, 0x21, 0x53, 0xa8, 0x69, 0xd3, 0xb9, 0x22, 0x24, 0x8d, 0xa1, 0xd1, 0x1a, 0x23, 0x2c, 0x79, 0x72, 0xd0, 0x1e, 0x23, 0x63, 0x66, 0x11, 0xd0, 0x2c, 0x23, 0x6b, 0x55, 0x59, 0xd1, 0x2c, 0x20, 0xa4, 0x3a, 0x7f, 0xcd, 0xb6, 0x28, 0x50, 0x2c, 0x40, 0xcd, 0x0b, 0x2a, 0x1d, 0x2b, 0x0a, 0xcc, 0x9d, 0x2b, 0x45, 0x2a, 0x4a, 0xcc, 0x53, 0x2c, 0x13, 0x29, 0xc8, 0x1e, 0x58, 0xc1, 0x4b, 0xd5, 0xe6, 0x21, 0x4f, 0xaf, 0x15, 0xb3, 0x58, 0x21, 0x84, 0xaf, 0x4c, 0xb2, 0x1d, 0x22, 0xde, 0xb3, 0x78, 0xaf, 0xc5, 0x22, 0xbc, 0xb6, 0xda, 0xae, 0x2d, 0x23, 0x05, 0xb6, 0xe9, 0xa8, 0x2e, 0x27, 0x70, 0xaa, 0xfa, 0x98, 0xb3, 0x27, 0xda, 0xaa, 0x57, 0x95, 0xb5, 0x2a, 0x54, 0xad, 0x50, 0x8d, 0x05, 0x2a, 0xd1, 0xa7, 0xdc, 0x7f, 0x38, 0x29, 0x94, 0xa7, 0x11, 0x75, 0xa8, 0x26, 0x4b, 0xa7, 0x43, 0x67, 0x02, 0x19, 0xe9, 0xa9, 0xa9, 0x47, 0x0c, 0x21, 0x9b, 0xc7, 0x81, 0x2b, 0x7e, 0x24, 0xe8, 0xc7, 0xfa, 0x29, 0x75, 0x27, 0x14, 0xc8, 0x43, 0x28, 0x3b, 0x28, 0x9c, 0xc8, 0x76, 0x27, 0x6a, 0x1e, 0x38, 0xc0, 0xb0, 0xd7, 0x22, 0x1e, 0x2c, 0xc0, 0x98, 0xd5, 0x24, 0x1f, 0xe2, 0xaf, 0x63, 0xb4, 0x96, 0x21, 0x97, 0xaf, 0x91, 0xb2, 0x33, 0x23, 0x1b, 0xb4, 0x00, 0xaf, 0x0f, 0x22, 0x46, 0xb6, 0x6b, 0xac, 0x6a, 0x24, 0x3a, 0xb5, 0x1b, 0xa3, 0x5c, 0x27, 0xa9, 0xab, 0x2b, 0x97, 0x31, 0x2a, 0x40, 0xac, 0x09, 0x8d, 0xc6, 0x2a, 0xd6, 0xa8, 0x91, 0x7f, 0xff, 0x29, 0x3a, 0xa6, 0xcf, 0x72, 0xb4, 0x25, 0xea, 0xa6, 0x9a, 0x64, 0xd3, 0x18, 0xb3, 0xa8, 0x12, 0x43, 0x1b, 0x1f, 0xc3, 0xbb, 0xcc, 0x28, 0x3f, 0x25, 0x19, 0xc2, 0x20, 0x26, 0x8b, 0x28, 0x74, 0xc6, 0xae, 0x26, 0xa0, 0x2a, 0x08, 0xc7, 0x33, 0x25, 0xfb, 0x18, 0xff, 0xb9, 0xc9, 0xdb, 0x9c, 0x1b, 0x5a, 0xbf, 0x01, 0xd8, 0xbd, 0x1d, 0xe0, 0xbf, 0x61, 0xd3, 0xd1, 0x1f, 0xdc, 0xaf, 0xc0, 0xb6, 0xf8, 0x21, 0x73, 0xb0, 0x05, 0xb2, 0xd1, 0x23, 0x21, 0xb3, 0xe3, 0xad, 0xc6, 0x25, 0x81, 0xb0, 0x87, 0xa6, 0x44, 0x29, 0x1d, 0xab, 0xe7, 0x99, 0xb0, 0x2a, 0x90, 0xaa, 0xb3, 0x8e, 0xda, 0x2b, 0x34, 0xa7, 0xef, 0x80, 0xde, 0x28, 0xb1, 0xa4, 0xd8, 0x6f, 0xd0, 0x20, 0x95, 0xa1, 0xad, 0x57, 0x64, 0x17, 0xb8, 0xa1, 0xfa, 0x37, 0xf6, 0x1f, 0x07, 0xa8, 0x2a, 0x22, 0x56, 0x26, 0x15, 0xb1, 0xf6, 0x20, 0x67, 0x29, 0x09, 0xba, 0x0c, 0x20, 0xcf, 0x49, 0x77, 0xc4, 0x2b, 0x1c, 0xbb, 0x17, 0x5d, 0xb2, 0xec, 0xdf, 0x13, 0x18, 0x5e, 0xb8, 0x3d, 0xdb, 0xc8, 0x1a, 0x53, 0xbb, 0x89, 0xd7, 0xf7, 0x1e, 0x4b, 0xb0, 0x5e, 0xc3, 0x53, 0x1f, 0xfb, 0xb0, 0x76, 0xb9, 0xb8, 0x21, 0x7b, 0xb2, 0xaf, 0xb3, 0x80, 0x2b, 0xd5, 0xab, 0x0d, 0xa3, 0x2d, 0x31, 0xe9, 0xa4, 0x46, 0x94, 0xad, 0x34, 0x05, 0xa0, 0x75, 0x87, 0xa4, 0x34, 0xf6, 0x9d, 0x41, 0x79, 0x36, 0x34, 0x5e, 0x9a, 0x9a, 0x67, 0x9f, 0x30, 0x37, 0x98, 0xd2, 0x4f, 0xcf, 0x29, 0xf8, 0x99, 0x01, 0x32, 0x78, 0x2b, 0x39, 0x98, 0x75, 0x1a, 0x80, 0x2d, 0x58, 0xa2, 0x3a, 0x13, 0xe5, 0x45, 0xde, 0xa6, 0x82, 0x12, 0x52, 0x66, 0x0d, 0xae, 0x66, 0x13, 0x12, 0x1b, 0x54, 0xae, 0xbd, 0xde, 0xc4, 0x19, 0xfe, 0xaf, 0xbb, 0xdd, 0xc2, 0x16, 0xeb, 0xb4, 0xc8, 0xdc, 0x35, 0x18, 0x91, 0xb6, 0xd6, 0xd5, 0x8f, 0x1b, 0xa2, 0xb2, 0xb5, 0xc6, 0x99, 0x29, 0xd6, 0xa9, 0xd5, 0xb1, 0x09, 0x34, 0x03, 0xa1, 0x68, 0x9d, 0xd2, 0x38, 0x76, 0x9b, 0xcd, 0x8f, 0xa2, 0x3b, 0xa5, 0x97, 0x86, 0x81, 0xb9, 0x3d, 0xf7, 0x94, 0x7e, 0x72, 0x3f, 0x3d, 0x70, 0x91, 0xd1, 0x5e, 0xab, 0x3c, 0xd2, 0x90, 0xbd, 0x47, 0x47, 0x3f, 0xce, 0x90, 0xeb, 0x30, 0x44, 0x43, 0xd1, 0x91, 0xba, 0x1c, 0x3a, 0x4e, 0x27, 0x97, 0xa6, 0x12, 0xf9, 0x65, 0x5f, 0xa5, 0xd6, 0x13, 0xc9, 0x70, 0xaf, 0xac, 0x2a, 0x17, 0x15, 0x1c, 0x1a, 0xa4, 0x9a, 0xe0, 0x0c, 0x1d, 0x3f, 0xa7, 0xf1, 0xde, 0x85, 0x1b, 0x02, 0xac, 0xc7, 0xdd, 0xf7, 0x14, 0xfd, 0xb0, 0xd3, 0xdb, 0x91, 0x26, 0x97, 0xa7, 0x66, 0xc4, 0x84, 0x35, 0x19, 0x9e, 0x4a, 0xad, 0xbd, 0x3c, 0xae, 0x97, 0x73, 0x9a, 0x0d, 0x43, 0x08, 0x93, 0x05, 0x8b, 0x9c, 0x49, 0x28, 0x8e, 0xb6, 0x7c, 0xaf, 0x4b, 0x28, 0x8c, 0x5f, 0x6b, 0x52, 0x4d, 0x54, 0x8a, 0x5a, 0x57, 0x51, 0x51, 0x06, 0x8a, 0x67, 0x42, 0x26, 0x56, 0x81, 0x8c, 0x1f, 0x32, 0x13, 0x5c, 0xd2, 0x8d, 0x71, 0x21, 0x01, 0x67, 0x5c, 0x92, 0x9b, 0x16, 0x90, 0x74, 0x9d, 0xa0, 0x7d, 0x19, 0xe3, 0x7f, 0xc2, 0xab, 0x56, 0x18, 0x01, 0x1e, 0x8a, 0x9a, 0xb1, 0xe1, 0xf4, 0x1f, 0x96, 0x9d, 0x28, 0xe0, 0xd1, 0x1e, 0x5e, 0xa2, 0xd9, 0xe1, 0x47, 0x22, 0x00, 0xa5, 0x05, 0xda, 0x99, 0x31, 0xc3, 0x9c, 0x1a, 0xc3, 0x7b, 0x40, 0x57, 0x93, 0x9b, 0xac, 0xab, 0x4a, 0x01, 0x8d, 0xc4, 0x99, 0x33, 0x51, 0x3e, 0x8a, 0x30, 0x87, 0xd3, 0x56, 0x97, 0x87, 0x14, 0x78, 0x4d, 0x5a, 0xfa, 0x84, 0xff, 0x67, 0x8b, 0x5e, 0xec, 0x84, 0x2a, 0x55, 0x04, 0x63, 0x46, 0x85, 0xc6, 0x43, 0x7f, 0x69, 0x44, 0x88, 0x84, 0x34, 0x51, 0x6e, 0x6e, 0x8b, 0x51, 0x26, 0x43, 0x74, 0x86, 0x90, 0x59, 0x19, 0xd0, 0x7f, 0x9f, 0x9a, 0x6d, 0x1b, 0x16, 0x92, 0x76, 0xa9, 0xe5, 0x1a, 0xf5, 0x22, 0x51, 0x8e, 0x0d, 0xe2, 0xfd, 0x23, 0x73, 0x92, 0x74, 0xe2, 0xbc, 0x26, 0x5e, 0x96, 0xda, 0xe2, 0x8a, 0x31, 0x4b, 0x97, 0xd3, 0xd9, 0xa6, 0x40, 0xc8, 0x8f, 0xc8, 0xc2, 0x54, 0x4d, 0xd8, 0x88, 0xdf, 0xac, 0x6c, 0x57, 0xd4, 0x84, 0x2c, 0x99, 0x66, 0x5f, 0x84, 0x81, 0x2a, 0x87, 0x60, 0x65, 0xdf, 0x7e, 0xf0, 0x77, 0x15, 0x6a, 0xee, 0x7e, 0x00, 0x65, 0x9b, 0x6f, 0x0c, 0x7e, 0xdd, 0x55, 0x04, 0x73, 0x01, 0x81, 0x65, 0x45, 0x4e, 0x76, 0xda, 0x84, 0xc0, 0x36, 0x2c, 0x7b, 0x77, 0x88, 0x12, 0x27, 0xc6, 0x7f, 0xd6, 0x8b, 0xeb, 0x1a, 0x83, 0x8c, 0x9c, 0x99, 0x90, 0x1c, 0xf7, 0x98, 0xe3, 0xa4, 0x63, 0x1e, 0x9e, 0x24, 0xce, 0x7d, 0x5a, 0xe6, 0x0c, 0x28, 0x00, 0x82, 0x7c, 0xe5, 0xde, 0x2e, 0xbd, 0x88, 0xac, 0xe5, 0xfb, 0x42, 0xc5, 0x8a, 0x8f, 0xd9, 0x84, 0x51, 0xd7, 0x83, 0x57, 0xc1, 0xbc, 0x5e, 0x50, 0x7d, 0x66, 0xac, 0x2b, 0x67, 0x99, 0x7a, 0xa2, 0x99, 0x3f, 0x6f, 0x97, 0x78, 0xc5, 0x88, 0x0c, 0x76, 0x7e, 0x77, 0x7e, 0x77, 0xb4, 0x79, 0x8f, 0x77, 0x34, 0x66, 0x15, 0x7d, 0x07, 0x78, 0xef, 0x55, 0xf1, 0x80, 0x73, 0x7b, 0xe7, 0x46, 0xfd, 0x83, 0xc5, 0x7f, 0x70, 0x38, 0x08, 0x87, 0x36, 0x83, 0x6a, 0x29, 0xc5, 0x8a, 0x0e, 0x88, 0x4f, 0x1c, 0x9e, 0x98, 0x45, 0x94, 0xbc, 0x1e, 0xfe, 0xa4, 0x1c, 0x9f, 0xa4, 0x20, 0xdd, 0x29, 0xb2, 0x70, 0x2d, 0xe8, 0x78, 0x2e, 0xcc, 0x73, 0x70, 0xe8, 0x5a, 0x3c, 0xc1, 0x7a, 0x40, 0xe8, 0x39, 0x56, 0x80, 0x7b, 0xe4, 0xd9, 0xda, 0x63, 0xd9, 0x77, 0x34, 0xc1, 0xdc, 0x70, 0x53, 0x73, 0x98, 0xab, 0x7b, 0x78, 0xcf, 0x71, 0xb9, 0x99, 0xcf, 0x7f, 0x1b, 0x70, 0xc5, 0x88, 0xf2, 0x84, 0x0a, 0x70, 0x11, 0x77, 0xbb, 0x87, 0xcc, 0x6f, 0x80, 0x66, 0xab, 0x8b, 0x60, 0x70, 0x90, 0x56, 0x4c, 0x8e, 0x4c, 0x73, 0xac, 0x47, 0x66, 0x91, 0x26, 0x77, 0x56, 0x38, 0xa3, 0x93, 0x94, 0x7b, 0xae, 0x2b, 0x3e, 0x96, 0xa1, 0x81, 0x15, 0x1e, 0xba, 0xa3, 0x70, 0x8c, 0x61, 0x20, 0x02, 0xae, 0xa7, 0x97, 0xbd, 0x21, 0x8a, 0x2b, 0xc6, 0x30, 0x4a, 0xd1, 0x86, 0x3a, 0x20, 0x60, 0xcb, 0xec, 0xae, 0x53, 0x24, 0x6b, 0x06, 0xec, 0x32, 0x6c, 0x04, 0x6d, 0xfe, 0xdc, 0x2b, 0x77, 0xdf, 0x6b, 0xde, 0xc3, 0xf0, 0x81, 0x74, 0x6b, 0x08, 0xad, 0xc5, 0x89, 0x0f, 0x6a, 0x1a, 0x9c, 0x70, 0x8c, 0xfd, 0x68, 0xfd, 0x8b, 0xe6, 0x91, 0x98, 0x67, 0x47, 0x79, 0x06, 0x95, 0xa1, 0x66, 0xc4, 0x67, 0xac, 0x99, 0x01, 0x67, 0xde, 0x57, 0x08, 0x9b, 0x57, 0x6a, 0x77, 0x47, 0xd1, 0x9d, 0x93, 0x6d, 0xac, 0x38, 0xd9, 0x9f, 0xbe, 0x72, 0x95, 0x2c, 0x91, 0xa2, 0x22, 0x79, 0x3f, 0x20, 0xf8, 0xad, 0xc6, 0x81, 0xaf, 0x1f, 0xe8, 0xb7, 0x9a, 0x8d, 0x14, 0x20, 0x17, 0x2b, 0xdb, 0x30, 0x39, 0xd1, 0x8c, 0x2d, 0x31, 0x2e, 0xbd, 0xd2, 0xa9, 0x6e, 0xfb, 0x5a, 0x90, 0xf1, 0x12, 0x81, 0x1e, 0x62, 0xb7, 0xdf, 0xe4, 0x8b, 0xc7, 0x61, 0xf6, 0xca, 0x94, 0x92, 0x47, 0x62, 0x6f, 0xb4, 0x09, 0x96, 0xda, 0x62, 0x34, 0xa1, 0x7b, 0x9b, 0x1e, 0x60, 0x9a, 0x8e, 0xc7, 0x9f, 0x37, 0x5d, 0xd3, 0x7b, 0x01, 0xa2, 0x72, 0x5c, 0xa2, 0x68, 0xfa, 0xa5, 0x8d, 0x5d, 0x4a, 0x58, 0xfb, 0xa7, 0xea, 0x5f, 0x8e, 0x48, 0xaf, 0xaa, 0x04, 0x62, 0x38, 0x38, 0xa9, 0xab, 0xa4, 0x65, 0xcf, 0x2b, 0xe5, 0xac, 0xe0, 0x6b, 0xdc, 0x1f, 0xa3, 0xb6, 0x47, 0x75, 0x97, 0x1d, 0x3f, 0xbf, 0x0b, 0x81, 0xc0, 0x1b, 0xf4, 0x2d, 0x0a, 0x2e, 0x7c, 0xd2, 0x77, 0x2d, 0x63, 0x2e, 0x9a, 0xd2, 0xb8, 0x86, 0x0d, 0x4a, 0x77, 0xf5, 0x27, 0x9e, 0x31, 0x52, 0x09, 0xee, 0x75, 0xa1, 0xb3, 0x57, 0x08, 0xd3, 0xea, 0xa4, 0x17, 0x59, 0x45, 0xbc, 0x2c, 0xa5, 0x38, 0x59, 0x73, 0xa8, 0x38, 0xa9, 0x53, 0x56, 0x54, 0x92, 0x99, 0xac, 0xef, 0x53, 0x13, 0x7d, 0x77, 0xaf, 0x72, 0x51, 0x5c, 0x6b, 0xcb, 0xb1, 0xeb, 0x50, 0xb5, 0x5a, 0xc3, 0xb4, 0xcb, 0x50, 0xab, 0x49, 0x78, 0xb7, 0x2c, 0x53, 0x37, 0x37, 0xcb, 0xb8, 0xe3, 0x55, 0x5f, 0x28, 0xc4, 0xb9, 0x7f, 0x5d, 0x2d, 0x1c, 0xe8, 0xc3, 0x58, 0x6a, 0x60, 0x1b, 0xd2, 0xc6, 0x20, 0x76, 0x30, 0x1b, 0x8c, 0x2c, 0xc1, 0x2f, 0x40, 0xd0, 0xd2, 0x81, 0x76, 0x35, 0xf2, 0xed, 0x9c, 0x99, 0x20, 0x36, 0x2e, 0xf1, 0x03, 0xb2, 0x70, 0x42, 0xb1, 0xec, 0x75, 0xba, 0x7c, 0x4a, 0x10, 0xe1, 0xae, 0xb8, 0x89, 0x4c, 0x6d, 0xc8, 0xa7, 0xb6, 0x70, 0x4d, 0x1c, 0xb1, 0x60, 0xb9, 0x59, 0x48, 0xf7, 0x98, 0x69, 0xbb, 0x97, 0x45, 0x14, 0x81, 0xc9, 0xbd, 0xd3, 0x42, 0x35, 0x6f, 0xbf, 0xc0, 0x56, 0x3f, 0x8e, 0x5e, 0x24, 0xc2, 0x8b, 0x3c, 0x41, 0x4a, 0xc5, 0xc6, 0x1d, 0x3a, 0x42, 0x35, 0x57, 0xc8, 0x3a, 0x3d, 0x16, 0x23, 0x3d, 0xc9, 0x5b, 0x4f, 0x48, 0x22, 0xe0, 0xca, 0x26, 0x5d, 0x20, 0x22, 0x61, 0xca, 0x5d, 0x6a, 0x14, 0x20, 0x29, 0x87, 0x72, 0x2d, 0x66, 0xe9, 0x50, 0x95, 0x28, 0x2e, 0x0e, 0xe6, 0xf9, 0xa7, 0x2a, 0x2f, 0xcf, 0xe6, 0x05, 0xb2, 0x7f, 0x34, 0x85, 0xe3, 0xdd, 0xd1, 0x25, 0x3a, 0xed, 0xf1, 0x25, 0xcf, 0x13, 0x3b, 0x24, 0xd9, 0x0e, 0xcc, 0x39, 0x3a, 0xfc, 0xbe, 0x49, 0xcc, 0x68, 0x38, 0x5d, 0xa3, 0x55, 0xcc, 0xcc, 0x35, 0x53, 0x8b, 0x7f, 0xcd, 0x3b, 0x31, 0xd8, 0x75, 0xfa, 0xce, 0xb7, 0x2e, 0x01, 0x62, 0x5c, 0xd1, 0x69, 0x28, 0x28, 0x4d, 0x2b, 0xd5, 0x00, 0x23, 0xea, 0x36, 0xcc, 0xcd, 0xb4, 0x2a, 0xf0, 0x2a, 0xf0, 0xcc, 0x8c, 0x2f, 0x2f, 0x29, 0x1b, 0xcc, 0x99, 0x4b, 0x9e, 0x25, 0xaf, 0xcc, 0x5f, 0x5a, 0x73, 0x24, 0x8e, 0x97, 0xa2, 0x2b, 0x86, 0xe2, 0xb8, 0xa4, 0x7f, 0x2c, 0x55, 0xe1, 0xc4, 0xb1, 0xd8, 0x2c, 0xe6, 0xe1, 0x40, 0xc5, 0xd9, 0x2d, 0x8d, 0xe4, 0x27, 0xd4, 0x33, 0x2c, 0x55, 0xe8, 0x08, 0xd7, 0xbb, 0x2e, 0x5a, 0xda, 0x3c, 0xd8, 0x8b, 0x2c, 0xce, 0xc4, 0xe4, 0xd8, 0xa5, 0x2a, 0x26, 0xa9, 0xac, 0xd7, 0x6f, 0x28, 0x73, 0x8f, 0x9b, 0xd4, 0x2d, 0x27, 0xba, 0x79, 0x4a, 0xd2, 0xb4, 0x26, 0xf0, 0x65, 0x21, 0xd2, 0x98, 0x25, 0x54, 0x50, 0x1b, 0xd3, 0xd9, 0x23, 0x52, 0x35, 0xee, 0xce, 0x8b, 0x2a, 0x66, 0x2c, 0x75, 0xcd, 0x6d, 0x2c, 0x25, 0x2a, 0xd8, 0xcc, 0xd0, 0x2d, 0x1f, 0x29, 0xfa, 0xcd, 0x48, 0x48, 0x67, 0x26, 0xae, 0xa3, 0x60, 0x2a, 0xe4, 0xdf, 0xf8, 0xaf, 0xe2, 0x2b, 0x0c, 0xdf, 0x24, 0xb8, 0x6b, 0x2b, 0x43, 0xe0, 0x0f, 0xca, 0xf1, 0x29, 0x43, 0xe3, 0x6d, 0xd5, 0xde, 0x2b, 0x35, 0xdc, 0x66, 0xd7, 0xea, 0x29, 0xd2, 0xd6, 0x93, 0xd7, 0xb2, 0x29, 0x18, 0xc2, 0x92, 0xd7, 0xd2, 0x27, 0x23, 0xaa, 0xd1, 0xd6, 0x68, 0x26, 0x7b, 0x90, 0x34, 0xd3, 0x95, 0x26, 0xa8, 0x7b, 0x8c, 0xd2, 0x57, 0x26, 0x80, 0x67, 0xf4, 0xd2, 0x7f, 0x26, 0x0e, 0x57, 0x03, 0xd3, 0xae, 0x22, 0xf0, 0x3c, 0x8a, 0xce, 0xe3, 0x29, 0x6f, 0x2d, 0x19, 0xcd, 0xf2, 0x2a, 0xfb, 0x2b, 0xb0, 0xcd, 0x5a, 0x2b, 0xfa, 0x2a, 0xd1, 0xcc, 0xf1, 0x2c, 0xac, 0x2a, 0x3a, 0x1c, 0x9f, 0xc4, 0x6f, 0xd9, 0x49, 0x21, 0xac, 0xb1, 0x00, 0xb5, 0x4f, 0x21, 0xe9, 0xb1, 0x7b, 0xb4, 0x59, 0x23, 0x62, 0xb7, 0x1e, 0xb2, 0x52, 0x23, 0x18, 0xba, 0x6d, 0xb1, 0x75, 0x23, 0x9d, 0xba, 0xcb, 0xab, 0x7e, 0x27, 0x2d, 0xb5, 0x06, 0xa1, 0xc4, 0x28, 0x2e, 0xad, 0x32, 0x97, 0x43, 0x2a, 0x52, 0xaf, 0xbc, 0x8d, 0xf1, 0x2b, 0x1d, 0xb1, 0x55, 0x88, 0x14, 0x29, 0xa3, 0xa8, 0xf0, 0x76, 0x30, 0x26, 0xaf, 0xa9, 0x59, 0x68, 0x33, 0x26, 0x06, 0xcb, 0x0c, 0x5b, 0x53, 0x22, 0x67, 0xc9, 0xe7, 0x2c, 0x29, 0x25, 0x8e, 0xc9, 0xd4, 0x29, 0xfc, 0x27, 0x9f, 0xc9, 0xc7, 0x28, 0xaa, 0x29, 0x12, 0xc9, 0xbd, 0x27, 0xc7, 0x1e, 0xc7, 0xc2, 0xc2, 0xd9, 0x32, 0x1c, 0x8c, 0xc4, 0x50, 0xd9, 0x08, 0x20, 0xac, 0xb1, 0xb0, 0xb7, 0x61, 0x21, 0xdd, 0xb2, 0xa9, 0xb5, 0xb1, 0x1f, 0x3b, 0xbc, 0x13, 0xb5, 0xd6, 0x22, 0xdc, 0xbc, 0x35, 0xb1, 0x9a, 0x25, 0x19, 0xba, 0x59, 0xa7, 0xcd, 0x27, 0xa1, 0xb2, 0x11, 0x9c, 0x91, 0x2a, 0x4c, 0xb0, 0x54, 0x90, 0x44, 0x2b, 0x24, 0xb1, 0xcf, 0x88, 0x4f, 0x29, 0x5b, 0xaa, 0x5b, 0x74, 0x41, 0x26, 0x77, 0xab, 0x62, 0x67, 0xbd, 0x1d, 0x8d, 0xc7, 0x09, 0x50, 0x65, 0x22, 0xe8, 0xc7, 0xb3, 0x2a, 0xa9, 0x26, 0xdd, 0xc8, 0x3d, 0x28, 0x5b, 0x29, 0x21, 0xc8, 0x87, 0x27, 0x27, 0x2a, 0x96, 0xc8, 0xb5, 0x26, 0x6a, 0x1b, 0x2b, 0xbc, 0x9e, 0xde, 0x11, 0x1c, 0xa5, 0xc2, 0xed, 0xdb, 0x4c, 0x1c, 0x68, 0xc4, 0x09, 0xd8, 0x98, 0x1b, 0x5e, 0xca, 0xb7, 0xd6, 0x54, 0x22, 0x90, 0xb5, 0xab, 0xb7, 0x9d, 0x1f, 0x63, 0xbf, 0x8f, 0xb8, 0x56, 0x22, 0x48, 0xc1, 0x33, 0xb5, 0x6d, 0x26, 0x1a, 0xbc, 0x14, 0xa7, 0x2a, 0x29, 0x0f, 0xb9, 0xbe, 0x9a, 0x3c, 0x2a, 0x1c, 0xb6, 0xa5, 0x8a, 0xd6, 0x27, 0xf5, 0xb3, 0xa1, 0x78, 0x6f, 0x20, 0xee, 0xb1, 0x7b, 0x5d, 0x3b, 0x18, 0xa0, 0xb1, 0xcb, 0x3c, 0xc5, 0x22, 0x7c, 0xba, 0xab, 0x25, 0x7f, 0x28, 0x69, 0xc0, 0xe5, 0x23, 0xf0, 0x2b, 0x88, 0xc6, 0x90, 0x24, 0xda, 0x4b, 0xcd, 0xc6, 0xe3, 0x1d, 0x89, 0x18, 0xe1, 0xb6, 0xdb, 0xe1, 0xf8, 0x1a, 0x5b, 0xbb, 0xce, 0xde, 0xbd, 0x1c, 0x15, 0xc0, 0x74, 0xdb, 0xd3, 0x1c, 0x15, 0xc3, 0x56, 0xd7, 0xab, 0x1a, 0x11, 0xc7, 0xf9, 0xd2, 0xf1, 0x1c, 0x6e, 0xc3, 0xb7, 0xc4, 0xbc, 0x28, 0xce, 0xba, 0xfb, 0xb1, 0xda, 0x31, 0x92, 0xb2, 0xad, 0xa0, 0xa6, 0x34, 0xe1, 0xae, 0x88, 0x92, 0x79, 0x37, 0x9d, 0xab, 0x79, 0x83, 0xed, 0x37, 0x02, 0xa8, 0x86, 0x70, 0x57, 0x33, 0x91, 0xa5, 0xea, 0x56, 0xe2, 0x2d, 0x0a, 0xa7, 0x98, 0x36, 0x0a, 0x2d, 0xda, 0xa7, 0xa5, 0x1a, 0x9a, 0x42, 0x3d, 0xbb, 0xd5, 0x1a, 0xa4, 0x51, 0x50, 0xc0, 0xb3, 0x19, 0xb9, 0x72, 0xfa, 0xc5, 0x1d, 0x18, 0x43, 0x1d, 0x1e, 0xb1, 0x3c, 0xe1, 0x9f, 0x1c, 0x40, 0xb2, 0xf8, 0xe1, 0x75, 0x1a, 0x17, 0xba, 0x2a, 0xe0, 0xfd, 0x1b, 0x97, 0xbe, 0x42, 0xdc, 0xb6, 0x1a, 0x1c, 0xc1, 0x1f, 0xd5, 0xe1, 0x29, 0x90, 0xb7, 0xd9, 0xbf, 0x43, 0x35, 0xc8, 0xae, 0x0b, 0xaa, 0x94, 0x3c, 0x9f, 0xa7, 0xad, 0x9a, 0x1f, 0x41, 0xaf, 0xa2, 0xa8, 0x8a, 0xe5, 0x44, 0xf5, 0xa0, 0x40, 0x7b, 0xcb, 0x45, 0x2e, 0x9d, 0x9c, 0x67, 0x32, 0x44, 0x34, 0x9c, 0x1c, 0x4e, 0x96, 0x46, 0xd8, 0x9c, 0x8e, 0x35, 0x39, 0x4b, 0x4c, 0x9e, 0x31, 0x20, 0xff, 0x52, 0x62, 0xa1, 0x5f, 0x0e, 0xa2, 0x68, 0x09, 0xab, 0x6f, 0x13, 0x1c, 0x74, 0xac, 0xb0, 0x6f, 0x16, 0xd9, 0x1f, 0x03, 0xa7, 0x1e, 0xe1, 0xf6, 0x20, 0x24, 0xac, 0x22, 0xe2, 0x06, 0x1f, 0x12, 0xb2, 0x6a, 0xe3, 0x9c, 0x1b, 0x2b, 0xba, 0xf1, 0xe6, 0xfe, 0x29, 0x9b, 0xb3, 0x31, 0xd1, 0x62, 0x37, 0xe7, 0xaa, 0x57, 0xba, 0xad, 0x43, 0xa6, 0xa1, 0x7b, 0xa3, 0x49, 0x4a, 0xa3, 0x9c, 0xf7, 0x94, 0xf2, 0x51, 0x8d, 0x99, 0x05, 0x85, 0xac, 0x54, 0x08, 0x96, 0xfc, 0x74, 0xbc, 0x56, 0xde, 0x95, 0x45, 0x60, 0xba, 0x5a, 0xbc, 0x95, 0x3c, 0x4b, 0x43, 0x5f, 0xd9, 0x96, 0x9a, 0x38, 0x23, 0x64, 0x91, 0x98, 0xb2, 0x28, 0x3d, 0x6a, 0xd8, 0x9a, 0x26, 0x16, 0x93, 0x7a, 0x7e, 0xa7, 0x33, 0x1a, 0x00, 0x8d, 0x19, 0xb8, 0xe1, 0x16, 0x43, 0x23, 0xd0, 0x9b, 0xbb, 0xe1, 0x38, 0x24, 0x53, 0x9f, 0xcf, 0xe1, 0xca, 0x25, 0xab, 0xa5, 0xf4, 0xe2, 0xc5, 0x29, 0xa6, 0xae, 0x88, 0xe5, 0x96, 0x39, 0x7b, 0xa5, 0xc8, 0xce, 0x5e, 0x47, 0x6a, 0x9d, 0x71, 0xb8, 0x1d, 0x51, 0xd1, 0x97, 0x39, 0xa2, 0xdb, 0x59, 0xc6, 0x93, 0xda, 0x91, 0x5d, 0x5f, 0x56, 0x90, 0xd8, 0x81, 0x9c, 0x64, 0x02, 0x8e, 0xda, 0x70, 0xd0, 0x68, 0x31, 0x8e, 0x74, 0x5d, 0xc9, 0x6c, 0x35, 0x8f, 0x87, 0x4c, 0x36, 0x71, 0xce, 0x91, 0xc8, 0x3a, 0xdc, 0x76, 0x90, 0x94, 0x85, 0x2c, 0xb7, 0x79, 0x87, 0x97, 0x91, 0x1c, 0x2d, 0x89, 0x92, 0xa1, 0xa0, 0x1d, 0x5f, 0x96, 0x0d, 0xaf, 0x5a, 0x19, 0xfc, 0x25, 0xe6, 0x90, 0xe1, 0xe3, 0x22, 0x28, 0x86, 0x94, 0x63, 0xe2, 0xa5, 0x2c, 0xb0, 0x99, 0xea, 0xe3, 0x5f, 0x3a, 0xac, 0xa0, 0x6f, 0xe3, 0x91, 0x4a, 0x05, 0x98, 0x95, 0xcc, 0x3a, 0x56, 0xab, 0x91, 0x11, 0xb6, 0xc1, 0x60, 0x78, 0x8d, 0x64, 0xa3, 0x33, 0x68, 0x9b, 0x8a, 0xa0, 0x91, 0x20, 0x6f, 0x13, 0x88, 0x7e, 0x80, 0x91, 0x74, 0x4e, 0x87, 0x82, 0x6e, 0xe2, 0x78, 0x3c, 0x88, 0x46, 0x5d, 0xc7, 0x7b, 0xcf, 0x8a, 0x61, 0x4d, 0xb1, 0x7f, 0x47, 0x8d, 0x41, 0x3e, 0x10, 0x83, 0x2d, 0x90, 0x43, 0x2e, 0xd2, 0x86, 0x7f, 0x94, 0x50, 0x1f, 0xfc, 0x91, 0xc2, 0x9f, 0x5f, 0x1d, 0x87, 0x9f, 0xe2, 0xab, 0xfa, 0x1d, 0xd9, 0x28, 0x9b, 0x7f, 0xdf, 0xe6, 0x2c, 0x2c, 0x94, 0x85, 0xb3, 0xe6, 0x04, 0x35, 0x66, 0x8c, 0xad, 0xe4, 0xfc, 0x4d, 0x9a, 0x92, 0xbb, 0xe2, 0x8d, 0x5b, 0x78, 0x8c, 0x7d, 0xcc, 0x0b, 0x67, 0x34, 0x86, 0x70, 0xb6, 0x97, 0x71, 0x11, 0x83, 0xad, 0xa3, 0xb4, 0x79, 0x11, 0x82, 0x49, 0x91, 0xf9, 0x80, 0x14, 0x81, 0x1a, 0x81, 0x50, 0x83, 0x2f, 0x80, 0xad, 0x6f, 0x38, 0x86, 0x75, 0x81, 0xc0, 0x5e, 0x87, 0x89, 0xa2, 0x84, 0x0c, 0x4f, 0x06, 0x8c, 0xa3, 0x87, 0x88, 0x40, 0x43, 0x8f, 0xcd, 0x8b, 0x8e, 0x31, 0x4d, 0x92, 0x84, 0x8f, 0xbd, 0x23, 0x47, 0x9c, 0x51, 0x98, 0xc5, 0x1f, 0x6c, 0xa9, 0xbc, 0xa6, 0x23, 0x21, 0x0c, 0x2d, 0x44, 0x72, 0xdd, 0xe8, 0x4e, 0x32, 0xec, 0x74, 0xad, 0xe6, 0xd2, 0x49, 0xc8, 0x7d, 0xf1, 0xe7, 0x01, 0x61, 0x49, 0x84, 0x36, 0xe3, 0x46, 0x6e, 0x46, 0x80, 0x8a, 0xcc, 0xf1, 0x7a, 0x4f, 0x7c, 0x1f, 0xb5, 0xbf, 0x83, 0x22, 0x7b, 0x2c, 0xa4, 0x6c, 0x89, 0x33, 0x7a, 0x80, 0x93, 0x40, 0x8e, 0x0d, 0x79, 0x97, 0x81, 0x83, 0x91, 0xdd, 0x78, 0xe6, 0x6f, 0xeb, 0x95, 0x1f, 0x79, 0x9c, 0x5e, 0xe8, 0x98, 0x38, 0x7b, 0xe2, 0x4f, 0x5a, 0x9a, 0x99, 0x7f, 0x3e, 0x40, 0xb9, 0x9c, 0xd1, 0x83, 0xc7, 0x32, 0xde, 0x9f, 0x3d, 0x88, 0x1b, 0x24, 0xfe, 0xa8, 0x23, 0x90, 0xa8, 0x20, 0x2a, 0xb0, 0x8a, 0x9a, 0xf0, 0x22, 0x59, 0x33, 0x20, 0x5f, 0xe7, 0xe9, 0x43, 0x46, 0xc8, 0x67, 0xbf, 0xec, 0xac, 0x5c, 0xf6, 0x6f, 0xc3, 0xea, 0x9c, 0x77, 0x82, 0x76, 0xe2, 0xe6, 0x58, 0x82, 0x0c, 0x75, 0xe4, 0xcf, 0xde, 0x8b, 0xe3, 0x74, 0x9d, 0xba, 0x3b, 0x93, 0x82, 0x73, 0xd7, 0xa7, 0x92, 0x97, 0x7e, 0x73, 0x3e, 0x96, 0x57, 0x9c, 0x00, 0x71, 0x52, 0x82, 0xd2, 0x9f, 0xc8, 0x70, 0x4c, 0x70, 0xe2, 0xa2, 0xad, 0x70, 0x90, 0x5f, 0xc1, 0xa4, 0xfe, 0x72, 0xa7, 0x4f, 0xf7, 0xa7, 0x13, 0x75, 0x94, 0x40, 0xdd, 0xa9, 0x49, 0x7a, 0x50, 0x33, 0x28, 0xaa, 0xf4, 0x7f, 0x6b, 0x26, 0x10, 0xb1, 0x57, 0x85, 0x25, 0x1f, 0x9d, 0xb9, 0xf8, 0x90, 0x7b, 0x20, 0x6d, 0x2c, 0xfe, 0x2e, 0x97, 0xd2, 0x7c, 0x60, 0x2d, 0x57, 0x80, 0xf0, 0xd1, 0x76, 0xc4, 0x60, 0xe0, 0xef, 0xa7, 0x8c, 0x71, 0x6b, 0xdd, 0xea, 0x7d, 0x96, 0x9f, 0x6c, 0x15, 0xd5, 0x20, 0x9d, 0xdd, 0x6c, 0x9f, 0xc0, 0x47, 0xa1, 0xf7, 0x6c, 0x8f, 0xac, 0xfc, 0xa5, 0xdf, 0x6a, 0xf1, 0x99, 0x53, 0xa9, 0xfb, 0x67, 0xfe, 0x84, 0xaa, 0xac, 0xec, 0x66, 0x5d, 0x72, 0x5e, 0xaf, 0x58, 0x66, 0x67, 0x61, 0x94, 0xb1, 0x77, 0x68, 0x05, 0x51, 0x01, 0xb3, 0xa3, 0x6a, 0x67, 0x40, 0x4c, 0xb4, 0xdb, 0x6e, 0x98, 0x32, 0x03, 0xb5, 0xd6, 0x73, 0x96, 0x24, 0x6f, 0xbb, 0x97, 0x7a, 0x0a, 0x1c, 0x4d, 0xc3, 0x8f, 0x85, 0x6d, 0x1b, 0xca, 0x2d, 0x17, 0x2e, 0x86, 0xd2, 0x83, 0x78, 0x96, 0x4a, 0x1e, 0xf6, 0xff, 0x8b, 0x3c, 0x51, 0xaa, 0xf3, 0x23, 0xa1, 0x5f, 0x5b, 0xfb, 0xec, 0x12, 0xab, 0x3a, 0x63, 0x33, 0xdb, 0xfd, 0xaf, 0xcd, 0x63, 0x9e, 0xc8, 0x04, 0xb1, 0x0d, 0x63, 0xb8, 0xb3, 0xbf, 0xb4, 0x7a, 0x60, 0xa1, 0x9d, 0x2d, 0xb7, 0xfd, 0x5c, 0xf2, 0x87, 0x17, 0xb9, 0xf7, 0x5b, 0x63, 0x74, 0x89, 0xbb, 0xe7, 0x5a, 0x79, 0x63, 0x08, 0xbe, 0x71, 0x59, 0xe8, 0x51, 0x1d, 0xc0, 0x8f, 0x5b, 0x22, 0x3e, 0xa4, 0xc1, 0xe7, 0x5d, 0x4e, 0x2d, 0x90, 0xc2, 0x8d, 0x64, 0x5a, 0x20, 0xc3, 0xc6, 0xe3, 0x6f, 0x52, 0x1e, 0x2f, 0xc8, 0x38, 0x7a, 0x69, 0x1d, 0x35, 0x2d, 0x30, 0x2e, 0x74, 0xd2, 0x8b, 0x8d, 0x54, 0x36, 0x17, 0xf3, 0x7d, 0xa0, 0xf4, 0x3f, 0x2d, 0xf0, 0xf2, 0xb0, 0x5a, 0x4b, 0xc2, 0xec, 0xe9, 0xc6, 0x7e, 0x54, 0xa2, 0xeb, 0xf0, 0xc1, 0xd1, 0x57, 0x1a, 0xd3, 0x5a, 0xc1, 0xdc, 0x57, 0x79, 0xbc, 0x90, 0xc3, 0xfe, 0x53, 0x69, 0xa3, 0x5d, 0xc5, 0xc2, 0x4f, 0x84, 0x8b, 0xc7, 0xc7, 0x68, 0x4c, 0x80, 0x78, 0x7f, 0xc9, 0x34, 0x49, 0xa8, 0x66, 0x4c, 0xcb, 0xbe, 0x46, 0xa6, 0x52, 0x3e, 0xce, 0x8c, 0x44, 0xb5, 0x3c, 0xc3, 0xd0, 0xca, 0x47, 0x14, 0x29, 0x5b, 0xcd, 0x39, 0x56, 0x0d, 0x25, 0x65, 0xcb, 0xff, 0x63, 0xb5, 0x22, 0xbc, 0xcb, 0x7e, 0x6e, 0x0c, 0x21, 0x86, 0x8d, 0x2a, 0x2f, 0x44, 0xea, 0x72, 0x9a, 0x72, 0x31, 0x1d, 0xe8, 0xa7, 0xa9, 0xae, 0x32, 0xde, 0xe7, 0x3e, 0xc1, 0x58, 0x3a, 0x2f, 0xee, 0x0d, 0xd3, 0x34, 0x42, 0x1c, 0xef, 0x37, 0xd6, 0x66, 0x47, 0xbe, 0xe1, 0xbd, 0xd3, 0x88, 0x47, 0xa1, 0xc6, 0xb3, 0xd5, 0x39, 0x43, 0xe0, 0xac, 0x90, 0xd5, 0xb4, 0x40, 0x0b, 0x93, 0x8e, 0xd5, 0xd5, 0x3c, 0x44, 0x7d, 0xed, 0xd7, 0x42, 0x38, 0xe1, 0x6a, 0xba, 0xda, 0xcf, 0x33, 0x13, 0x53, 0xed, 0xd9, 0x08, 0x2e, 0xda, 0x3a, 0xa7, 0xd0, 0x7c, 0x2f, 0x8e, 0x2c, 0xfa, 0xcf, 0x60, 0x41, 0xd6, 0x28, 0xc2, 0xce, 0x73, 0x54, 0xb2, 0x26, 0x99, 0xcd, 0xea, 0x5f, 0xa5, 0x25, 0x43, 0x9b, 0x37, 0x2d, 0x9f, 0xe4, 0xfe, 0xa6, 0xbc, 0x2e, 0x14, 0xe3, 0xc2, 0xb4, 0x9a, 0x2f, 0x2f, 0xe3, 0xcc, 0xc7, 0x51, 0x32, 0xa1, 0xe5, 0x59, 0xd4, 0x23, 0x33, 0x17, 0xe2, 0x1c, 0xd8, 0x83, 0x34, 0x43, 0xda, 0x55, 0xd9, 0xf3, 0x32, 0xfe, 0xc6, 0xd8, 0xda, 0x48, 0x30, 0xc2, 0xac, 0x4a, 0xd9, 0xf1, 0x2f, 0x04, 0x92, 0xc6, 0xd7, 0xc0, 0x2d, 0x05, 0x7c, 0x8f, 0xd5, 0xe4, 0x2b, 0x80, 0x67, 0xf8, 0xd5, 0x67, 0x2a, 0x01, 0x53, 0x0c, 0xd6, 0xae, 0x27, 0xcf, 0x3b, 0xaa, 0xd0, 0x31, 0x2b, 0xfe, 0x2d, 0xa6, 0xce, 0x95, 0x2d, 0x45, 0x2b, 0xad, 0xcd, 0xb4, 0x2d, 0xff, 0x2a, 0xa1, 0xce, 0xaa, 0x52, 0xd6, 0x27, 0x00, 0xa4, 0xf7, 0x2c, 0x1e, 0xe1, 0x64, 0xb1, 0xb2, 0x2c, 0x8a, 0xe0, 0xd9, 0xc7, 0x1b, 0x2d, 0x04, 0xe3, 0xcf, 0xd0, 0x7a, 0x2b, 0xfc, 0xe6, 0xe6, 0xd6, 0x95, 0x2e, 0xfb, 0xdc, 0x27, 0xd8, 0xe5, 0x2e, 0x30, 0xd7, 0xbb, 0xd8, 0xa2, 0x2d, 0x21, 0xc3, 0xac, 0xd8, 0xe3, 0x2b, 0x58, 0xab, 0x92, 0xd8, 0x63, 0x2a, 0x40, 0x92, 0x2b, 0xd5, 0xa7, 0x29, 0xae, 0x7d, 0x46, 0xd4, 0x3c, 0x29, 0x35, 0x69, 0x8b, 0xd4, 0x5e, 0x28, 0xac, 0x58, 0x7b, 0xd5, 0x89, 0x25, 0xc9, 0x3e, 0x9f, 0xd0, 0x0e, 0x2a, 0x8f, 0x2d, 0xf1, 0xce, 0xd9, 0x2b, 0xd9, 0x2c, 0x55, 0xce, 0x15, 0x2c, 0xae, 0x2b, 0x58, 0xcd, 0x8f, 0x2d, 0x44, 0x2a, 0xab, 0x1c, 0x4f, 0xc7, 0xf3, 0xdb, 0xd1, 0x1c, 0xef, 0xce, 0x5e, 0xda, 0x46, 0x22, 0x0d, 0xb3, 0xd9, 0xb6, 0xfe, 0x1f, 0x68, 0xbe, 0x05, 0xb7, 0xd8, 0x23, 0x73, 0xbe, 0x06, 0xb4, 0xba, 0x24, 0x35, 0xbe, 0xa9, 0xae, 0xc7, 0x27, 0x3f, 0xb9, 0x64, 0xa5, 0xa4, 0x28, 0x52, 0xb6, 0x8c, 0x9e, 0xfe, 0x2a, 0x6a, 0xb1, 0xf9, 0x8e, 0xb6, 0x2b, 0x2c, 0xb2, 0xcf, 0x89, 0x1d, 0x2e, 0x84, 0xcb, 0x34, 0x85, 0x9f, 0x2d, 0xbc, 0xcb, 0x69, 0x76, 0xc0, 0x28, 0x8b, 0xcc, 0x55, 0x5c, 0xaa, 0x23, 0x36, 0xcc, 0x4e, 0x2c, 0xd4, 0x26, 0x34, 0xcb, 0xb1, 0x2a, 0x81, 0x28, 0x2a, 0xcb, 0x4b, 0x29, 0x18, 0x29, 0x8a, 0xcb, 0x04, 0x28, 0x25, 0x1f, 0x57, 0xc4, 0xd8, 0xdb, 0x43, 0x1c, 0x77, 0xc8, 0x41, 0xdc, 0x0c, 0x1c, 0xef, 0xce, 0x82, 0xda, 0x44, 0x1a, 0x5b, 0xd7, 0xd7, 0xd9, 0xfe, 0x1f, 0x90, 0xc1, 0x62, 0xba, 0x35, 0x23, 0x73, 0xc2, 0x0f, 0xb6, 0xcc, 0x25, 0xf9, 0xbf, 0x92, 0xac, 0x35, 0x27, 0xf1, 0xbc, 0x52, 0xa5, 0xb2, 0x2a, 0x6e, 0xb7, 0x54, 0x95, 0x0a, 0x2b, 0x3f, 0xb4, 0x7d, 0x8a, 0x2e, 0x2e, 0x75, 0xcb, 0x25, 0x84, 0xe2, 0x2b, 0xa2, 0xcb, 0xb9, 0x6d, 0x09, 0x26, 0x49, 0xcb, 0x77, 0x55, 0xaa, 0x24, 0x0e, 0xcb, 0x0e, 0x2b, 0x9a, 0x27, 0xb6, 0xca, 0x9f, 0x29, 0x08, 0x29, 0xce, 0xca, 0x5f, 0x27, 0xad, 0x2b, 0x27, 0xca, 0x37, 0x26, 0xd8, 0x1c, 0x86, 0xc0, 0x19, 0xe0, 0x19, 0x1f, 0x75, 0xc5, 0x26, 0xdc, 0x11, 0x1d, 0xb6, 0xc8, 0x63, 0xdb, 0xb9, 0x1c, 0xef, 0xce, 0xc7, 0xda, 0x40, 0x1b, 0x3d, 0xd7, 0x38, 0xd8, 0x1c, 0x20, 0x20, 0xc8, 0x6b, 0xc0, 0x72, 0x1c, 0xc9, 0xd2, 0x94, 0xc5, 0x34, 0x22, 0x63, 0xcd, 0x7b, 0xb5, 0x5e, 0x27, 0x94, 0xca, 0x98, 0xa6, 0x97, 0x29, 0x75, 0xc7, 0x06, 0x95, 0x1d, 0x27, 0xc7, 0xc4, 0x61, 0x7f, 0xca, 0x21, 0x4d, 0xc2, 0xb9, 0x62, 0x6d, 0x19, 0xb6, 0xc4, 0x0e, 0x3e, 0xcc, 0x26, 0x10, 0xc8, 0x24, 0x28, 0xcd, 0x2a, 0x7d, 0xc8, 0xb3, 0x26, 0x77, 0x40, 0x7f, 0xc9, 0x6d, 0x20, 0xfa, 0x4d, 0x64, 0xc9, 0xe9, 0x20, 0xb9, 0x1a, 0xdf, 0xba, 0x6c, 0xe4, 0x54, 0x1c, 0x14, 0xbe, 0xac, 0xe1, 0xe3, 0x1d, 0xd1, 0xc5, 0x57, 0xdf, 0x9a, 0x1e, 0x1c, 0xc9, 0x5c, 0xdc, 0xbe, 0x1b, 0x2f, 0xd1, 0xe3, 0xdb, 0x70, 0x17, 0xd7, 0xd6, 0x0c, 0xd6, 0xb7, 0x26, 0x14, 0xcb, 0xdf, 0xc1, 0x9d, 0x2f, 0xc9, 0xc4, 0x28, 0xaf, 0xbc, 0x35, 0x46, 0xbf, 0x79, 0x9f, 0x46, 0x38, 0x6f, 0xbc, 0x20, 0x8e, 0x27, 0x38, 0x13, 0xb9, 0x6d, 0x78, 0xaf, 0x34, 0x51, 0xb7, 0x1e, 0x5d, 0x28, 0x31, 0x62, 0xb8, 0x45, 0x3a, 0x53, 0x32, 0xdc, 0xb8, 0xb1, 0x1d, 0xa0, 0x48, 0x0f, 0xc3, 0x08, 0x1c, 0x88, 0x56, 0x34, 0xc5, 0x8d, 0x1a, 0xed, 0x74, 0x76, 0xc7, 0x9a, 0x19, 0xce, 0x1f, 0x42, 0xb3, 0x64, 0xe3, 0xf0, 0x1c, 0xfd, 0xb7, 0xee, 0xe6, 0x05, 0x1d, 0x83, 0xbf, 0x20, 0xe5, 0x27, 0x1e, 0xe3, 0xc5, 0x63, 0xe3, 0x61, 0x1d, 0xd9, 0xce, 0x5c, 0xe3, 0x2e, 0x2a, 0x69, 0xc6, 0xdc, 0xce, 0x14, 0x36, 0xc8, 0xbd, 0xe0, 0xb8, 0xf6, 0x40, 0x9c, 0xb5, 0x7c, 0xa5, 0xfc, 0x47, 0x05, 0xb1, 0x3e, 0x96, 0x78, 0x4c, 0xe1, 0xad, 0x22, 0x86, 0x8d, 0x4d, 0xa1, 0xab, 0x3e, 0x70, 0xc5, 0x4d, 0x63, 0xaa, 0x54, 0x56, 0xea, 0x4e, 0xfa, 0xab, 0x1a, 0x3a, 0x59, 0x54, 0x5e, 0xad, 0xdd, 0x27, 0x8b, 0x5f, 0x7a, 0xb0, 0xfb, 0x16, 0xc9, 0x6b, 0xd6, 0xb2, 0x9e, 0x14, 0x17, 0x80, 0x8e, 0xc4, 0x63, 0x15, 0x82, 0x24, 0x20, 0xa8, 0x52, 0xe1, 0x7a, 0x23, 0xd8, 0xaf, 0x09, 0xe3, 0x63, 0x24, 0xa3, 0xb4, 0xb2, 0xe5, 0x1c, 0x25, 0xb8, 0xbd, 0xbe, 0xe8, 0x2d, 0x2e, 0xef, 0xbf, 0x92, 0xde, 0x40, 0x3c, 0xa2, 0xb7, 0x37, 0xc7, 0xc2, 0x4a, 0x29, 0xad, 0xdd, 0xae, 0xcd, 0x52, 0x3e, 0xa8, 0xa8, 0x9f, 0xa7, 0x59, 0xb1, 0xa4, 0x88, 0x8f, 0x5e, 0x5d, 0x66, 0xa2, 0x28, 0x7e, 0x51, 0x60, 0x63, 0xa0, 0xdf, 0x6a, 0x4d, 0x64, 0x21, 0xa0, 0xf1, 0x54, 0xde, 0x69, 0xba, 0xa2, 0x2a, 0x40, 0x33, 0x70, 0x77, 0xa3, 0xde, 0x30, 0xd7, 0x76, 0x61, 0xa6, 0x86, 0x1f, 0xf3, 0x7d, 0x6b, 0xaf, 0x26, 0x16, 0xb2, 0x91, 0x57, 0xbe, 0xb2, 0x15, 0xc8, 0x26, 0xe7, 0x9c, 0xa9, 0xe1, 0xbf, 0x28, 0x14, 0xa2, 0x54, 0xe2, 0x9d, 0x2a, 0xd9, 0xa8, 0xb9, 0xe3, 0xec, 0x33, 0x38, 0xaf, 0x7a, 0xe4, 0x90, 0x42, 0xf8, 0xae, 0x7b, 0xd6, 0xbb, 0x4f, 0x82, 0xa7, 0x7e, 0xc2, 0x37, 0x5a, 0x39, 0xa1, 0xa0, 0xad, 0x72, 0x62, 0x90, 0x9d, 0xf6, 0x9b, 0x7b, 0x68, 0x9e, 0x9b, 0x0f, 0x8b, 0x82, 0x6d, 0x99, 0x99, 0x27, 0x7a, 0x60, 0x71, 0xdf, 0x98, 0xbf, 0x66, 0xf9, 0x75, 0xd4, 0x99, 0x6e, 0x54, 0xc1, 0x7a, 0xdd, 0x9b, 0x62, 0x43, 0xd3, 0x7f, 0xa6, 0x9e, 0x36, 0x34, 0x4a, 0x84, 0xf9, 0xa0, 0x97, 0x24, 0xb4, 0x8c, 0x98, 0xa9, 0x8c, 0x1a, 0x7d, 0x98, 0xac, 0xb5, 0xdd, 0x1a, 0x20, 0x28, 0xe5, 0x93, 0x0a, 0xe3, 0x4b, 0x2c, 0x0f, 0x97, 0x15, 0xe3, 0x27, 0x31, 0x25, 0x9c, 0xb1, 0xe4, 0x0e, 0x43, 0x2e, 0xa2, 0xf4, 0xe3, 0xbd, 0x54, 0x0e, 0xa0, 0x27, 0xd3, 0x8f, 0x60, 0x31, 0x99, 0x55, 0xc0, 0x47, 0x69, 0xb5, 0x96, 0xad, 0xad, 0xb1, 0x71, 0xfd, 0x94, 0x66, 0x9b, 0x20, 0x78, 0xc1, 0x92, 0xb3, 0x8a, 0x76, 0x7d, 0xd2, 0x91, 0xb4, 0x78, 0x6c, 0x81, 0xcc, 0x92, 0x14, 0x66, 0xc7, 0x85, 0x3f, 0x93, 0x94, 0x55, 0xc9, 0x88, 0x71, 0x96, 0x05, 0x46, 0x3b, 0x8b, 0xf6, 0x98, 0xf4, 0x36, 0x45, 0x8f, 0x43, 0x9c, 0x0d, 0x27, 0x42, 0x95, 0x71, 0xa3, 0xd2, 0x1e, 0x12, 0xa3, 0x31, 0xae, 0xb9, 0x1e, 0x5e, 0x2b, 0xbe, 0x82, 0xe5, 0xe6, 0x30, 0x30, 0x2a, 0x88, 0xbf, 0xe6, 0x1b, 0x40, 0x20, 0x8f, 0x72, 0xe4, 0x84, 0x56, 0xca, 0x95, 0x47, 0xe2, 0xa3, 0x65, 0xd2, 0x94, 0x05, 0xd4, 0x14, 0x70, 0x95, 0x8f, 0x65, 0xc0, 0x51, 0x7a, 0xa1, 0x8d, 0x0a, 0xad, 0xe7, 0x82, 0xcc, 0x8b, 0xf9, 0x9c, 0x3f, 0x8a, 0x01, 0x8b, 0x0b, 0x8b, 0x40, 0x8d, 0x3d, 0x8a, 0x9c, 0x78, 0xb6, 0x90, 0x32, 0x8b, 0x27, 0x67, 0x3c, 0x93, 0x34, 0x8c, 0xaf, 0x56, 0x91, 0x95, 0x93, 0x8f, 0xca, 0x47, 0xe4, 0x97, 0xf9, 0x93, 0x84, 0x38, 0x24, 0x9a, 0xbf, 0x96, 0x6a, 0x29, 0x43, 0xa0, 0x93, 0x9d, 0x1f, 0x20, 0x39, 0xad, 0xd9, 0xaa, 0xf1, 0x21, 0xc1, 0x30, 0x3d, 0x74, 0x20, 0xe8, 0x5f, 0x38, 0xde, 0x7a, 0x23, 0xe7, 0xfa, 0x54, 0xc7, 0x80, 0xcc, 0xe5, 0xba, 0x69, 0x47, 0x87, 0xfd, 0xe3, 0xcc, 0x78, 0xfe, 0x88, 0xaa, 0xd5, 0x7c, 0x83, 0xc3, 0x85, 0xbb, 0xc1, 0x2b, 0x8c, 0xde, 0x85, 0x1d, 0xaf, 0x41, 0x93, 0x40, 0x84, 0x4b, 0x9d, 0xd5, 0x98, 0x5e, 0x83, 0x6f, 0x8b, 0xc3, 0x9c, 0x0d, 0x82, 0x8f, 0x79, 0x67, 0x9e, 0xa9, 0x82, 0xbd, 0x67, 0xea, 0xa1, 0x2f, 0x84, 0x63, 0x57, 0xb2, 0xa3, 0x7b, 0x87, 0x7e, 0x48, 0x4e, 0xa5, 0x5c, 0x8a, 0xe1, 0x38, 0x83, 0xa7, 0x44, 0x8e, 0x7a, 0x2a, 0x81, 0xac, 0x9c, 0x94, 0x4c, 0x21, 0x27, 0xb5, 0x2e, 0x9e, 0x61, 0x20, 0xbf, 0x36, 0x7c, 0x63, 0x87, 0xea, 0x53, 0x53, 0x98, 0x6d, 0x53, 0xea, 0xd1, 0x69, 0xbe, 0x73, 0xba, 0xe8, 0xdd, 0x7f, 0x93, 0x7b, 0xcf, 0xe6, 0x6f, 0x8d, 0x9e, 0x7f, 0x08, 0xd8, 0xb9, 0x96, 0x9d, 0x7e, 0x46, 0xc5, 0x5e, 0x9e, 0x16, 0x7d, 0xd2, 0xb3, 0x0e, 0xa2, 0x25, 0x7d, 0x9d, 0xa1, 0xc9, 0xa6, 0x8e, 0x7b, 0x2a, 0x8d, 0x01, 0xaa, 0x2c, 0x7a, 0x03, 0x7a, 0x21, 0xac, 0x4a, 0x79, 0xe8, 0x68, 0xf1, 0xae, 0x64, 0x7b, 0x48, 0x58, 0x8f, 0xb0, 0x01, 0x7d, 0x88, 0x48, 0x4c, 0xb1, 0xd8, 0x80, 0xc9, 0x38, 0x34, 0xb2, 0xec, 0x84, 0x44, 0x29, 0xeb, 0xb7, 0x4a, 0x89, 0xdc, 0x1f, 0xc7, 0xbe, 0x6b, 0x91, 0xf3, 0x1e, 0xaa, 0x2d, 0x0b, 0x2e, 0xa1, 0xd2, 0x87, 0x6b, 0xe2, 0x5e, 0xc4, 0xef, 0xc3, 0x7e, 0x4f, 0x65, 0xae, 0xef, 0x29, 0x93, 0xc3, 0x71, 0x13, 0xeb, 0x91, 0xa2, 0xd1, 0x76, 0x4c, 0xdf, 0x9d, 0xa7, 0xef, 0x76, 0x2b, 0xcc, 0x03, 0xad, 0x41, 0x76, 0x8c, 0xb8, 0x6b, 0xb0, 0xa2, 0x75, 0x3c, 0xa3, 0xfe, 0xb4, 0x86, 0x71, 0xf1, 0x8e, 0x5e, 0xb7, 0x35, 0x70, 0x86, 0x7b, 0xb0, 0xb8, 0xa8, 0x6f, 0xbb, 0x6a, 0x47, 0xba, 0x7c, 0x70, 0xc1, 0x59, 0x75, 0xbc, 0x2b, 0x72, 0xb7, 0x48, 0x13, 0xbd, 0xab, 0x75, 0x95, 0x37, 0x21, 0xbe, 0x6c, 0x78, 0xe2, 0x27, 0x2a, 0xc1, 0xae, 0x7d, 0x7f, 0x1a, 0x9c, 0xc5, 0x7c, 0x87, 0x2c, 0x1b, 0x65, 0x2d, 0x24, 0x2e, 0x8f, 0xd2, 0x8f, 0x82, 0xa6, 0x52, 0x7e, 0xf5, 0x03, 0x90, 0xa6, 0x58, 0x96, 0xf0, 0x98, 0xa5, 0xb1, 0x62, 0xc5, 0xe9, 0x7e, 0xb6, 0x99, 0x6d, 0x86, 0xe7, 0x36, 0xba, 0x57, 0x6d, 0xa6, 0xd3, 0xb1, 0xbc, 0x7f, 0x6d, 0xbb, 0xbf, 0x69, 0xbf, 0x54, 0x6b, 0x2a, 0xa8, 0x6b, 0xc2, 0x00, 0x67, 0x53, 0x91, 0xaf, 0xc3, 0x54, 0x65, 0x4f, 0x7e, 0x02, 0xc4, 0x57, 0x64, 0x4c, 0x6b, 0xa9, 0xc5, 0xd3, 0x63, 0x9a, 0x59, 0x06, 0xc7, 0x99, 0x64, 0x56, 0x45, 0xb6, 0xc9, 0x60, 0x66, 0xc8, 0x32, 0xe4, 0xca, 0xb3, 0x6a, 0x0d, 0x22, 0x20, 0xca, 0xa5, 0x73, 0xd9, 0x1f, 0xc9, 0xca, 0x58, 0x7e, 0xa4, 0x1e, 0xd2, 0x83, 0xae, 0x36, 0x71, 0xf6, 0x24, 0x93, 0x14, 0x3c, 0x43, 0xf4, 0xbd, 0xa7, 0xa9, 0x48, 0xde, 0xed, 0x7c, 0xb4, 0xa3, 0x52, 0x97, 0xec, 0x0e, 0xc4, 0xf5, 0x5d, 0x2b, 0xe9, 0xb9, 0xc9, 0xc5, 0x64, 0x56, 0xdd, 0x2d, 0xcc, 0x24, 0x61, 0x1b, 0xc8, 0x2e, 0xce, 0x58, 0x5d, 0x89, 0xaf, 0xce, 0xcf, 0xbc, 0x59, 0xf2, 0x98, 0x29, 0xd0, 0x9a, 0x56, 0xde, 0x82, 0xed, 0xd1, 0xa1, 0x54, 0x2b, 0x6e, 0xf5, 0xd2, 0x0f, 0x52, 0xfc, 0x59, 0xfd, 0xd3, 0xe6, 0x53, 0x24, 0x44, 0x12, 0xd6, 0x05, 0x54, 0x36, 0x2f, 0xf1, 0xd0, 0x67, 0x5c, 0x25, 0x27, 0xd7, 0xcd, 0xfd, 0x68, 0xcb, 0x24, 0x5a, 0xcd, 0x19, 0x72, 0x77, 0x22, 0xc2, 0x8b, 0xfc, 0x33, 0xe8, 0xe9, 0xc4, 0xa2, 0x91, 0x33, 0x8a, 0xe9, 0x2b, 0xb0, 0x30, 0x36, 0x8d, 0xec, 0x12, 0xc6, 0x67, 0x40, 0x7a, 0xef, 0x36, 0xd0, 0x5b, 0x4b, 0x61, 0xeb, 0x89, 0xd8, 0x00, 0x4f, 0x42, 0xdf, 0x4f, 0xdc, 0xc5, 0x52, 0xf4, 0xd1, 0x25, 0xde, 0xd4, 0x4f, 0x0d, 0xb7, 0x54, 0xdf, 0x44, 0x4a, 0xd3, 0x9d, 0x32, 0xde, 0x8f, 0x47, 0x0a, 0x86, 0xa9, 0xdf, 0xc9, 0x43, 0x89, 0x73, 0x1a, 0xdd, 0xb1, 0x3a, 0x3c, 0x58, 0x05, 0xdb, 0x33, 0x34, 0xcd, 0x3d, 0xf5, 0xd3, 0xd1, 0x34, 0x16, 0x2f, 0x0f, 0xd1, 0x6b, 0x49, 0x86, 0x2a, 0x4e, 0xd0, 0x71, 0x59, 0xe2, 0x27, 0xbe, 0xce, 0x77, 0x66, 0x50, 0x25, 0x2a, 0x9e, 0x7b, 0x2f, 0x20, 0xe6, 0xa0, 0xa8, 0xf8, 0x2f, 0xd4, 0xe5, 0xbe, 0xb1, 0x94, 0x33, 0x39, 0xe2, 0x26, 0xcf, 0xa3, 0x35, 0x0b, 0xeb, 0xa7, 0xd9, 0x6a, 0x34, 0xfc, 0xe4, 0x1d, 0xd9, 0x0c, 0x38, 0x46, 0xda, 0x78, 0xdb, 0x02, 0x37, 0xbd, 0xc8, 0x38, 0xdb, 0x45, 0x34, 0xd1, 0xad, 0xb5, 0xdb, 0x35, 0x33, 0x37, 0x94, 0x72, 0xda, 0x68, 0x31, 0x2c, 0x7e, 0xfb, 0xd8, 0x5b, 0x2f, 0x21, 0x6a, 0x2c, 0xd7, 0xa7, 0x2d, 0xcf, 0x55, 0x66, 0xd8, 0x4e, 0x2b, 0xe7, 0x3e, 0x2e, 0xce, 0xd2, 0x32, 0x83, 0x31, 0xd9, 0xcf, 0xbc, 0x2e, 0x67, 0x2c, 0x84, 0xce, 0x95, 0x2e, 0xe1, 0x2b, 0x47, 0xd0, 0x35, 0x58, 0x85, 0x27, 0xb1, 0xa6, 0x8e, 0x2d, 0x5a, 0xe2, 0xcd, 0xb3, 0x84, 0x2e, 0x0c, 0xe2, 0x8a, 0xca, 0xc6, 0x2e, 0x41, 0xe7, 0x60, 0xd7, 0xb2, 0x2f, 0x53, 0xe6, 0x80, 0xd7, 0x1e, 0x31, 0xcb, 0xdb, 0xf6, 0xd9, 0x5c, 0x31, 0x7e, 0xd8, 0x58, 0xd9, 0x55, 0x30, 0x26, 0xc4, 0x5d, 0xd9, 0xb1, 0x2e, 0x8a, 0xac, 0x98, 0xd9, 0x60, 0x2d, 0x76, 0x93, 0x5a, 0xd7, 0x68, 0x2c, 0x53, 0x7e, 0xb7, 0xd5, 0xde, 0x2b, 0x91, 0x6a, 0xe7, 0xd6, 0x01, 0x2a, 0xfd, 0x59, 0xbc, 0xd7, 0x0e, 0x28, 0x78, 0x40, 0x83, 0xd1, 0x36, 0x2b, 0xaf, 0x2e, 0xc8, 0xcf, 0xbe, 0x2c, 0xb7, 0x2c, 0xfc, 0xce, 0xd0, 0x2d, 0x63, 0x2b, 0xdf, 0xce, 0x2c, 0x2d, 0xde, 0x2b, 0x1d, 0x1e, 0x1f, 0xca, 0xd8, 0xdc, 0xe5, 0x1d, 0xc4, 0xd0, 0x7c, 0xdc, 0x4f, 0x1b, 0x2b, 0xdb, 0x10, 0xdd, 0x35, 0x1f, 0xa1, 0xc2, 0x0d, 0xba, 0xe8, 0x23, 0xd0, 0xc1, 0xa4, 0xb8, 0x02, 0x24, 0xce, 0xc2, 0x81, 0xb2, 0x0b, 0x27, 0x59, 0xbc, 0xb4, 0xa8, 0x6d, 0x28, 0x78, 0xbb, 0x71, 0xa2, 0xdf, 0x2c, 0xf2, 0xcd, 0x85, 0x9f, 0xcb, 0x2e, 0xbb, 0xcb, 0x91, 0x94, 0x55, 0x2e, 0x97, 0xcb, 0x47, 0x85, 0xa8, 0x2e, 0x05, 0xcb, 0xa6, 0x76, 0x43, 0x2a, 0x20, 0xcd, 0x21, 0x5d, 0x82, 0x25, 0xc6, 0xcd, 0xf9, 0x2e, 0x12, 0x26, 0xde, 0xcd, 0x8b, 0x2b, 0x08, 0x28, 0xb7, 0xcc, 0xcf, 0x29, 0x86, 0x2a, 0x03, 0xcc, 0x4c, 0x28, 0x83, 0x1f, 0xe9, 0xc6, 0xed, 0xdd, 0x53, 0x1e, 0x6d, 0xcb, 0x9b, 0xdd, 0xa9, 0x1e, 0x09, 0xd1, 0x49, 0xdc, 0xef, 0x1c, 0x25, 0xdb, 0xd6, 0xdc, 0xe7, 0x1a, 0x3f, 0xde, 0x1d, 0xd9, 0x0e, 0x1e, 0xa5, 0xcb, 0xca, 0xc1, 0xb4, 0x22, 0x41, 0xce, 0x93, 0xbb, 0x1b, 0x27, 0x13, 0xc3, 0xdd, 0xab, 0x5b, 0x2c, 0x68, 0xce, 0xbb, 0xa4, 0x85, 0x2e, 0x7d, 0xcb, 0xe6, 0x96, 0x09, 0x2e, 0x91, 0xcb, 0x45, 0x84, 0x58, 0x2c, 0xe4, 0xcc, 0x7f, 0x6d, 0x3c, 0x29, 0xeb, 0xcd, 0x7e, 0x51, 0x19, 0x26, 0x25, 0xce, 0x04, 0x2c, 0xf5, 0x28, 0x94, 0xcd, 0x02, 0x29, 0xb5, 0x2a, 0x7d, 0xcc, 0x39, 0x28, 0x34, 0x2b, 0xb7, 0xcb, 0xb9, 0x27, 0x46, 0x1d, 0xc6, 0xc3, 0xa8, 0xe2, 0x31, 0x20, 0x39, 0xc7, 0xe0, 0xde, 0xc6, 0x1e, 0xf8, 0xcc, 0xdc, 0xde, 0xfb, 0x1c, 0x68, 0xd6, 0x18, 0xdf, 0xce, 0x1c, 0x48, 0xdd, 0x8f, 0xdd, 0xd1, 0x1c, 0xb8, 0xdf, 0xe5, 0xd7, 0x44, 0x22, 0x93, 0xdc, 0x1c, 0xca, 0xfb, 0x27, 0xbf, 0xd7, 0x55, 0xbc, 0x61, 0x2d, 0x53, 0xd1, 0x96, 0xaa, 0x65, 0x2e, 0xb5, 0xcc, 0xb6, 0x98, 0x0e, 0x2e, 0x93, 0xcb, 0x51, 0x82, 0x49, 0x2b, 0x4c, 0xcd, 0x4a, 0x65, 0x27, 0x26, 0x60, 0xce, 0xce, 0x42, 0xfb, 0x1e, 0xb8, 0xcf, 0x5b, 0x21, 0x48, 0x2b, 0xbb, 0xcc, 0x07, 0x27, 0x6a, 0x42, 0xe5, 0xcc, 0xd4, 0x20, 0xc4, 0x4e, 0xff, 0xcc, 0x81, 0x21, 0xe5, 0x1d, 0x3b, 0xbd, 0x76, 0xe5, 0xf0, 0x1e, 0x92, 0xc2, 0x8f, 0xe4, 0x84, 0x21, 0x69, 0xc8, 0xff, 0xe0, 0xaf, 0x20, 0xb9, 0xce, 0xc2, 0xe1, 0x09, 0x1d, 0xfb, 0xda, 0x42, 0xe2, 0x92, 0x19, 0xce, 0xe6, 0x66, 0xe5, 0xe2, 0x26, 0x3b, 0xdc, 0x9b, 0xd1, 0x94, 0x33, 0x28, 0xd4, 0x38, 0xbd, 0xdd, 0x3a, 0x1c, 0xcf, 0x3c, 0xab, 0xf4, 0x3d, 0xd5, 0xcb, 0xce, 0x97, 0xa8, 0x3e, 0x2c, 0xc9, 0xf3, 0x80, 0xfa, 0x3b, 0xbf, 0xc9, 0x27, 0x61, 0xe5, 0x37, 0x24, 0xca, 0x07, 0x3e, 0x3a, 0x3a, 0x90, 0xca, 0x4a, 0x21, 0xca, 0x4d, 0x81, 0xca, 0x7f, 0x21, 0x11, 0x5f, 0xb3, 0xcb, 0x54, 0x1e, 0x5e, 0x78, 0x36, 0xcb, 0x1b, 0x1b, 0x9f, 0x22, 0x16, 0xb4, 0x7f, 0xe4, 0xb0, 0x20, 0x65, 0xba, 0xde, 0xe7, 0x79, 0x22, 0x5f, 0xc1, 0x24, 0xe5, 0xa1, 0x24, 0x5f, 0xc8, 0x5e, 0xe4, 0x7e, 0x28, 0x1b, 0xd0, 0xe8, 0xe3, 0x61, 0x2e, 0x1f, 0xd6, 0xad, 0xdd, 0x06, 0x3d, 0x04, 0xcc, 0xeb, 0xc7, 0xc4, 0x46, 0xff, 0xc5, 0xe4, 0xb5, 0x3f, 0x50, 0x62, 0xbf, 0xc6, 0xa3, 0x40, 0x54, 0x8d, 0xbd, 0x00, 0x91, 0x0b, 0x56, 0xb5, 0xbb, 0x8a, 0x7a, 0x53, 0x57, 0xb8, 0xba, 0x50, 0x60, 0x03, 0x59, 0xcc, 0xbb, 0x9b, 0x42, 0x65, 0x5d, 0xb6, 0xbd, 0xbb, 0x2b, 0xe9, 0x68, 0xb2, 0xbf, 0xdc, 0x1a, 0x6a, 0x7b, 0x26, 0xc6, 0xec, 0x17, 0xe4, 0x89, 0x3f, 0xc8, 0xd9, 0x13, 0xea, 0x26, 0x7c, 0xaa, 0xdb, 0xe2, 0x83, 0x26, 0xdb, 0xb1, 0xd0, 0xe4, 0xa1, 0x28, 0xe7, 0xb6, 0x6a, 0xe6, 0x3c, 0x2b, 0xdb, 0xc0, 0x17, 0xe8, 0xb1, 0x35, 0x35, 0xc9, 0x5e, 0xe9, 0x38, 0x46, 0x4c, 0xc3, 0x2c, 0xd2, 0x35, 0x52, 0x32, 0xbc, 0x5c, 0xbd, 0x7c, 0x5c, 0x58, 0xb5, 0x48, 0xab, 0x93, 0x63, 0x1c, 0xb1, 0x57, 0x9a, 0x93, 0x67, 0x2c, 0xae, 0xfa, 0x89, 0x28, 0x6a, 0xd2, 0xad, 0xe1, 0x74, 0x01, 0x6e, 0x53, 0xad, 0xd3, 0x5e, 0x11, 0x73, 0xbf, 0xae, 0xcd, 0x48, 0x90, 0x79, 0x2a, 0xb1, 0xb8, 0x35, 0x96, 0x7e, 0x4e, 0xb4, 0xcb, 0x24, 0x23, 0x83, 0xd4, 0xba, 0x0a, 0x14, 0x0d, 0x93, 0x66, 0xc2, 0x5d, 0x15, 0x61, 0x29, 0x71, 0x9e, 0x74, 0xe2, 0x59, 0x2b, 0x19, 0xa4, 0xc2, 0xe3, 0x58, 0x2f, 0xc4, 0xaa, 0xc2, 0xe3, 0xa0, 0x38, 0x46, 0xb3, 0x49, 0xe7, 0x5c, 0x4d, 0xe2, 0xb5, 0x3f, 0xda, 0xc7, 0x58, 0xc0, 0xb2, 0xaf, 0xcb, 0xd9, 0x63, 0x58, 0xac, 0xc3, 0xb8, 0xbd, 0x6c, 0x1f, 0xa9, 0x0b, 0xa6, 0x6f, 0x72, 0xf1, 0xa6, 0x14, 0x95, 0xd7, 0x78, 0x05, 0xa4, 0x5d, 0x84, 0x36, 0x7c, 0x35, 0xa3, 0xc5, 0x70, 0xb2, 0x80, 0x3e, 0xa4, 0x8c, 0x5e, 0x15, 0x84, 0x46, 0xa6, 0x05, 0x4b, 0xab, 0x87, 0xe1, 0xa9, 0x4e, 0x39, 0x64, 0x8c, 0xa9, 0xac, 0x39, 0x29, 0xf8, 0x91, 0x46, 0xb0, 0x2e, 0x19, 0xeb, 0x9d, 0x82, 0xbc, 0xef, 0x19, 0x86, 0x2b, 0xdf, 0x94, 0x50, 0xe2, 0xff, 0x2e, 0xe1, 0x99, 0xb3, 0xe3, 0x93, 0x34, 0x6b, 0x9e, 0xdc, 0xe4, 0xa5, 0x4b, 0xff, 0xa5, 0xfd, 0xe4, 0x8e, 0x5e, 0x1f, 0xa7, 0x53, 0xd9, 0xe5, 0x68, 0xb1, 0xa2, 0xc1, 0xc8, 0xae, 0x73, 0x20, 0xa0, 0xcc, 0xb8, 0x0e, 0x7b, 0xc9, 0x9e, 0xc3, 0xa5, 0xb7, 0x83, 0x07, 0x9d, 0x41, 0x94, 0x9e, 0x88, 0x18, 0x9c, 0x5b, 0x82, 0x49, 0x8b, 0xda, 0x9c, 0x37, 0x70, 0x42, 0x8e, 0xea, 0x9d, 0x5a, 0x5e, 0x76, 0x91, 0x72, 0x9f, 0x8f, 0x4e, 0x0f, 0x94, 0x44, 0xa2, 0x6d, 0x3d, 0x9c, 0x97, 0x71, 0xa5, 0x7c, 0x2d, 0xd9, 0x99, 0xa5, 0xa9, 0x90, 0x1f, 0x09, 0xa8, 0x7b, 0xb5, 0xc3, 0x1e, 0xb9, 0x2e, 0x65, 0x85, 0xd4, 0xe6, 0x2e, 0x33, 0x7e, 0x8c, 0x65, 0xe4, 0xe3, 0x49, 0x7e, 0x91, 0xc8, 0xe4, 0x16, 0x5d, 0xb2, 0x98, 0xaa, 0xe3, 0xb7, 0x6f, 0x5a, 0x9b, 0x79, 0xda, 0x88, 0x7a, 0xea, 0x99, 0x92, 0xca, 0x2f, 0x84, 0x8d, 0x96, 0xf1, 0xb8, 0x3a, 0x8c, 0xe1, 0x96, 0x3b, 0xa6, 0xc3, 0x94, 0x1b, 0x95, 0x27, 0x95, 0x5c, 0x97, 0x67, 0x95, 0x07, 0x82, 0xbd, 0x9a, 0x3d, 0x94, 0xd1, 0x70, 0xa9, 0x9c, 0x5d, 0x95, 0xac, 0x5f, 0x3f, 0x9e, 0x3e, 0x98, 0x79, 0x4f, 0x84, 0xa0, 0x64, 0x9b, 0xcf, 0x3f, 0xad, 0xa2, 0xa6, 0x9f, 0x19, 0x30, 0x9c, 0xa4, 0xe4, 0xa2, 0x0b, 0x21, 0x79, 0xb0, 0xd7, 0xaf, 0x30, 0x20, 0xee, 0x32, 0xbc, 0x75, 0x25, 0xe6, 0xc1, 0x48, 0xd7, 0x7e, 0xc8, 0xe6, 0xd3, 0x5b, 0x87, 0x84, 0x65, 0xe6, 0x14, 0x70, 0xad, 0x8b, 0x86, 0xe4, 0xae, 0x82, 0xb7, 0x91, 0x85, 0xdf, 0x8c, 0x8d, 0xf0, 0x90, 0x88, 0xcc, 0x5e, 0x97, 0x54, 0x8f, 0x32, 0xba, 0x74, 0x9d, 0x9b, 0x8e, 0xba, 0xa8, 0xb3, 0xa2, 0xcc, 0x8d, 0xdb, 0x96, 0x59, 0xa6, 0x27, 0x8c, 0xc2, 0x83, 0x40, 0xa8, 0x91, 0x8c, 0x43, 0x71, 0x0b, 0xaa, 0x5d, 0x8c, 0xff, 0x60, 0x2b, 0xac, 0x11, 0x8f, 0x2c, 0x4f, 0xd6, 0xad, 0xc4, 0x92, 0x4c, 0x3f, 0xa3, 0xaf, 0x37, 0x95, 0x66, 0x30, 0xc0, 0xb1, 0x1a, 0x98, 0x84, 0x22, 0x11, 0xb9, 0x8e, 0xa3, 0x12, 0x21, 0x43, 0x46, 0x41, 0x6a, 0x10, 0xec, 0x29, 0x5a, 0x3a, 0x72, 0x0c, 0xea, 0x5c, 0x70, 0x83, 0x77, 0x72, 0xe8, 0xf6, 0x87, 0x35, 0x80, 0xbd, 0xe7, 0xc3, 0x98, 0x32, 0x88, 0x5b, 0xe3, 0xb5, 0xa1, 0x76, 0x88, 0x62, 0xcf, 0xe9, 0xa8, 0xac, 0x88, 0x21, 0xbe, 0x8f, 0xad, 0x48, 0x87, 0xb7, 0xac, 0x8c, 0xb1, 0x37, 0x85, 0x8f, 0x97, 0x91, 0xb3, 0xe1, 0x83, 0xd5, 0x84, 0x08, 0xb5, 0xfb, 0x83, 0x71, 0x71, 0xd7, 0xb7, 0x0b, 0x83, 0xef, 0x61, 0x15, 0xb8, 0x2b, 0x85, 0x57, 0x4f, 0xda, 0xb9, 0x74, 0x87, 0xbc, 0x3e, 0xa6, 0xba, 0xd1, 0x8a, 0x60, 0x2f, 0x63, 0xbb, 0xe3, 0x8c, 0x8b, 0x1f, 0x59, 0xc2, 0xf5, 0x96, 0x2f, 0x1e, 0x97, 0x2d, 0x17, 0x2e, 0xaa, 0xd2, 0x92, 0x72, 0x9d, 0x61, 0xa0, 0xef, 0x5a, 0x88, 0x8a, 0x6b, 0xa5, 0xec, 0xb0, 0x97, 0x91, 0x74, 0xd7, 0xeb, 0x74, 0xad, 0xa0, 0x80, 0x3a, 0xea, 0xef, 0xb3, 0x4b, 0x80, 0xf8, 0xd6, 0x91, 0xb8, 0x2b, 0x80, 0xb2, 0xc3, 0xc1, 0xbb, 0x43, 0x7f, 0x62, 0xaf, 0x2b, 0xbf, 0x13, 0x7c, 0x5b, 0x98, 0xb4, 0xc0, 0x54, 0x7a, 0x5c, 0x85, 0x38, 0xc1, 0x51, 0x79, 0x54, 0x73, 0x50, 0xc2, 0x48, 0x79, 0x9e, 0x61, 0xea, 0xc3, 0x43, 0x7a, 0xaa, 0x4f, 0x9b, 0xc4, 0x2b, 0x7c, 0xc2, 0x3c, 0xf8, 0xc5, 0x7d, 0x7e, 0xb4, 0x2b, 0x76, 0xc6, 0x10, 0x80, 0xc9, 0x1a, 0xd1, 0xc8, 0x3c, 0x8a, 0x99, 0x1b, 0x9e, 0x2d, 0x31, 0x2e, 0x99, 0xd2, 0x9b, 0x86, 0x19, 0x56, 0x95, 0xf3, 0xe1, 0x98, 0x97, 0x5f, 0xfc, 0xec, 0x34, 0xa9, 0x42, 0x66, 0x69, 0xe9, 0x79, 0xba, 0x4e, 0x70, 0xe4, 0xea, 0x9b, 0xc4, 0xa3, 0x78, 0xcb, 0xdf, 0xbf, 0xc6, 0xa4, 0x77, 0x64, 0xca, 0xc5, 0xc8, 0xaa, 0x75, 0x94, 0xb4, 0x33, 0xca, 0xcc, 0x71, 0xd7, 0x9d, 0xdd, 0xcb, 0xd7, 0x6f, 0x52, 0x88, 0xeb, 0xcc, 0xaf, 0x6d, 0x60, 0x75, 0x51, 0xcd, 0x69, 0x6c, 0x72, 0x61, 0xb3, 0xce, 0xa2, 0x6c, 0xc0, 0x4d, 0xd5, 0xcf, 0xe0, 0x6e, 0x98, 0x39, 0xaa, 0xd0, 0xc0, 0x71, 0xea, 0x26, 0xcb, 0xcd, 0x6a, 0x79, 0xf4, 0x22, 0xba, 0xcc, 0x83, 0x83, 0x18, 0x20, 0xad, 0x8c, 0xa1, 0x3c, 0x5e, 0xf6, 0x48, 0x9a, 0x3f, 0x45, 0x2d, 0xf1, 0x4f, 0xa7, 0x19, 0x4d, 0xdc, 0xec, 0x6f, 0xb7, 0x9a, 0x5b, 0x47, 0xe9, 0x60, 0xc8, 0x06, 0x62, 0xb8, 0xe9, 0x84, 0xd3, 0x35, 0x6e, 0xfb, 0xe8, 0xa4, 0xd4, 0xc9, 0x6d, 0x0a, 0xd3, 0x43, 0xd7, 0x16, 0x69, 0x9c, 0xba, 0xa6, 0xd8, 0x44, 0x65, 0x9f, 0xa2, 0x19, 0xd8, 0xb1, 0x62, 0x4b, 0x8c, 0x07, 0xd8, 0x89, 0x5f, 0xe1, 0x77, 0x53, 0xd8, 0xf8, 0x5d, 0x7d, 0x62, 0x45, 0xda, 0x4c, 0x5d, 0xc5, 0x4b, 0x84, 0xdc, 0x8c, 0x5d, 0x6f, 0x36, 0xbd, 0xd4, 0x0b, 0x62, 0x83, 0x2a, 0x59, 0xd0, 0x0a, 0x6d, 0xef, 0x26, 0x0e, 0xce, 0xc4, 0x76, 0xef, 0x24, 0x16, 0x96, 0x13, 0x32, 0xf3, 0xed, 0x62, 0xa6, 0x35, 0x36, 0x76, 0xec, 0xff, 0xb6, 0x4d, 0x3b, 0x7a, 0xf0, 0x05, 0xc5, 0x34, 0x4a, 0xf6, 0xec, 0xc3, 0xd0, 0x52, 0x53, 0xab, 0xe8, 0x5a, 0xd5, 0xfd, 0x57, 0x7a, 0xdd, 0x5b, 0xdb, 0xb7, 0x59, 0xd5, 0xd3, 0x5e, 0xe0, 0x0b, 0x54, 0x62, 0xb9, 0x68, 0xde, 0xd2, 0x51, 0x2e, 0x9f, 0xa8, 0xe0, 0xf7, 0x4b, 0xcf, 0x88, 0xeb, 0xe2, 0x46, 0x47, 0x14, 0x74, 0x34, 0xe0, 0x52, 0x41, 0x36, 0x5b, 0xcf, 0xdd, 0x46, 0x3a, 0xbc, 0x41, 0x34, 0xd8, 0xa5, 0x41, 0x17, 0x31, 0x80, 0xd3, 0xfd, 0x4f, 0xe2, 0x2b, 0xd3, 0xd1, 0xaa, 0x5d, 0x8e, 0x28, 0xcc, 0xcf, 0xb8, 0x6a, 0xef, 0x26, 0x0a, 0xa2, 0x65, 0x30, 0xad, 0xe6, 0xa2, 0xa7, 0xa1, 0x34, 0x03, 0xdf, 0xe9, 0xb5, 0x56, 0x35, 0xb6, 0xe4, 0xfe, 0xd2, 0xc8, 0x38, 0x65, 0xee, 0x50, 0xd9, 0xf3, 0x37, 0xc1, 0xe2, 0x05, 0xd9, 0xf6, 0x42, 0xc1, 0xda, 0xdc, 0xdd, 0x7b, 0x44, 0x36, 0xce, 0xdd, 0xdc, 0xae, 0x3a, 0xcd, 0xb0, 0x56, 0xdc, 0x11, 0x36, 0x11, 0x95, 0x74, 0xdc, 0x77, 0x34, 0x84, 0x80, 0xd4, 0xda, 0x51, 0x32, 0x15, 0x6b, 0xe8, 0xd9, 0x7f, 0x30, 0xfb, 0x57, 0x52, 0xd9, 0x93, 0x2f, 0x29, 0x40, 0x3b, 0xd3, 0x72, 0x2f, 0x31, 0x30, 0x04, 0xd0, 0xdf, 0x2f, 0x8c, 0x2d, 0x5d, 0xd1, 0xb0, 0x4b, 0x17, 0x2a, 0x57, 0xd1, 0x62, 0x5a, 0xb0, 0x28, 0x78, 0xa9, 0x7f, 0x2f, 0x1c, 0xe3, 0xe2, 0xb5, 0x58, 0x2f, 0x92, 0xe4, 0x36, 0xc8, 0x21, 0x32, 0xed, 0xe3, 0x2c, 0xd5, 0x76, 0x32, 0xd7, 0xdf, 0x44, 0xd7, 0x87, 0x33, 0xf9, 0xdb, 0xd0, 0xd9, 0x88, 0x33, 0xfd, 0xd8, 0xa3, 0xd9, 0xe0, 0x32, 0x7d, 0xc4, 0xc9, 0xda, 0x51, 0x31, 0x0b, 0xad, 0x4c, 0xda, 0x29, 0x2f, 0xfc, 0x94, 0x2d, 0xd8, 0xea, 0x2e, 0xa8, 0x7f, 0xec, 0xd7, 0x4a, 0x2d, 0xa7, 0x6c, 0x13, 0xd7, 0x73, 0x2d, 0x0f, 0x5a, 0xd1, 0xd8, 0x3b, 0x2a, 0xf3, 0x42, 0x39, 0xd2, 0x5f, 0x2c, 0xcf, 0x2f, 0x9e, 0xd0, 0xa2, 0x2d, 0x96, 0x2d, 0xa1, 0xcf, 0x8a, 0x2e, 0x1a, 0x2c, 0x64, 0xce, 0xc8, 0x2e, 0x7a, 0x2b, 0x8f, 0x1e, 0xd6, 0xce, 0x23, 0xde, 0xc8, 0x1e, 0x9e, 0xd2, 0x98, 0xde, 0x54, 0x1c, 0x9c, 0xdd, 0xf2, 0xdf, 0x18, 0x1c, 0x07, 0xdf, 0x57, 0xdd, 0x0d, 0x24, 0x2d, 0xc5, 0x47, 0xbb, 0x49, 0x20, 0x01, 0xcf, 0x01, 0xbe, 0x19, 0x23, 0xbb, 0xc5, 0x97, 0xb0, 0x40, 0x2b, 0x06, 0xd2, 0x2e, 0xae, 0x0b, 0x2d, 0xab, 0xcd, 0xf9, 0x9f, 0xcd, 0x2f, 0x07, 0xcb, 0xb7, 0x94, 0x00, 0x2e, 0xa3, 0xcb, 0x54, 0x85, 0xac, 0x2e, 0x3c, 0xcb, 0xd6, 0x75, 0xbb, 0x2b, 0x36, 0xcd, 0xad, 0x5e, 0x17, 0x28, 0x04, 0xcf, 0x16, 0x2f, 0x1c, 0x28, 0x5c, 0xce, 0xdf, 0x2c, 0x18, 0x29, 0x45, 0xce, 0x52, 0x29, 0xf5, 0x2a, 0x7c, 0xcd, 0x93, 0x28, 0xe0, 0x20, 0x7c, 0xc9, 0x05, 0xdf, 0x6a, 0x1f, 0x5b, 0xcf, 0x57, 0xe0, 0x08, 0x1c, 0xe0, 0xd7, 0xc0, 0xe1, 0x6d, 0x1c, 0xe0, 0xdf, 0xd0, 0xe0, 0x78, 0x1d, 0x37, 0xe1, 0x62, 0xdb, 0xc8, 0x22, 0xf0, 0xdd, 0x9a, 0xd0, 0xff, 0x24, 0x8c, 0xdb, 0x7a, 0xc6, 0x80, 0x2a, 0x7f, 0xd5, 0x35, 0xb5, 0x5c, 0x2d, 0xad, 0xcf, 0x9b, 0xa4, 0xd3, 0x2e, 0xeb, 0xcc, 0x2f, 0x95, 0xcc, 0x2e, 0x9e, 0xcb, 0x54, 0x83, 0xc4, 0x2d, 0x94, 0xcc, 0xed, 0x6d, 0x25, 0x2c, 0x6a, 0xce, 0xd2, 0x52, 0x36, 0x29, 0x36, 0xcf, 0x8d, 0x2e, 0xa2, 0x29, 0xa6, 0xcf, 0x3b, 0x2a, 0x94, 0x2b, 0x2c, 0xce, 0x12, 0x28, 0xbc, 0x2c, 0x48, 0xcd, 0x3b, 0x27, 0xb4, 0x1f, 0xa1, 0xc6, 0x78, 0xe3, 0x43, 0x21, 0xa2, 0xc9, 0xcb, 0xe0, 0x87, 0x20, 0xfb, 0xd0, 0x65, 0xe1, 0x1c, 0x1e, 0x8f, 0xda, 0x96, 0xe2, 0xc4, 0x1e, 0x82, 0xe2, 0x5e, 0xe2, 0x09, 0x22, 0x4a, 0xe1, 0x92, 0xd8, 0x4e, 0x27, 0xb3, 0xde, 0x5c, 0xcc, 0x79, 0x2b, 0xa4, 0xd9, 0xe7, 0xbe, 0x53, 0x2f, 0xa5, 0xd2, 0xe6, 0xab, 0x36, 0x2f, 0x42, 0xcc, 0xdc, 0x97, 0xb5, 0x2e, 0xb3, 0xcb, 0x45, 0x81, 0xa2, 0x2d, 0x5b, 0xce, 0x5c, 0x65, 0x9f, 0x2b, 0xb7, 0xd2, 0x11, 0x41, 0xa8, 0x2c, 0x0b, 0xd0, 0x9c, 0x2d, 0x96, 0x2c, 0xfc, 0xcf, 0x5b, 0x28, 0x5f, 0x49, 0xd0, 0xd0, 0x5b, 0x22, 0x0b, 0x53, 0x44, 0xcf, 0x5d, 0x20, 0x25, 0x20, 0x3f, 0xbf, 0x52, 0xe5, 0xe4, 0x21, 0x84, 0xc4, 0xbe, 0xe4, 0xbc, 0x23, 0xfe, 0xca, 0x83, 0xe1, 0xb9, 0x24, 0x85, 0xd1, 0x86, 0xe2, 0x42, 0x23, 0x36, 0xdd, 0x98, 0xe4, 0x48, 0x28, 0x4c, 0xe3, 0x48, 0xe1, 0x21, 0x2e, 0xa4, 0xe1, 0xa4, 0xd4, 0xc5, 0x37, 0xf1, 0xe3, 0x65, 0xcb, 0x4d, 0x3f, 0xbe, 0xdf, 0x22, 0xb8, 0xa6, 0x44, 0xf2, 0xdb, 0x50, 0xa2, 0xa0, 0x45, 0x2f, 0xda, 0x5b, 0x8a, 0x2b, 0x43, 0x84, 0xd9, 0x99, 0x69, 0x3b, 0x3f, 0x18, 0xdb, 0x12, 0x43, 0xd5, 0x44, 0x21, 0xd7, 0x4a, 0x29, 0x6a, 0x53, 0x19, 0xd1, 0xa7, 0x21, 0x42, 0x70, 0x95, 0xd1, 0x0b, 0x1e, 0x80, 0x7a, 0xb1, 0xce, 0x6f, 0x1d, 0x97, 0x24, 0x8c, 0xb5, 0x74, 0xe5, 0x57, 0x23, 0xab, 0xbd, 0x17, 0xe7, 0xf1, 0x25, 0x70, 0xc3, 0x6f, 0xe6, 0xbe, 0x28, 0x39, 0xca, 0xea, 0xe5, 0x16, 0x2f, 0x96, 0xd3, 0x10, 0xe3, 0x4d, 0x3a, 0xe8, 0xdf, 0xe9, 0xe5, 0x24, 0x47, 0x3a, 0xda, 0x4e, 0xd4, 0x2f, 0x50, 0xe4, 0xd3, 0x24, 0xc1, 0xfc, 0x57, 0xdf, 0xce, 0x1f, 0xaf, 0x00, 0x5c, 0xeb, 0xcb, 0x00, 0x9b, 0xb2, 0x5e, 0x7a, 0xca, 0x0b, 0x83, 0xda, 0x5f, 0xe3, 0xc9, 0xa4, 0x69, 0x89, 0x61, 0xb3, 0xca, 0xc3, 0x4c, 0x7e, 0x69, 0xb1, 0xcc, 0xe0, 0x31, 0xa3, 0x72, 0xf6, 0xce, 0x6f, 0x1d, 0xdb, 0x7b, 0x2d, 0xcb, 0xfa, 0x1b, 0xa4, 0x90, 0x6a, 0xcb, 0xc6, 0x17, 0x52, 0x28, 0x83, 0xad, 0x5d, 0xe3, 0x82, 0x29, 0x58, 0xb4, 0x7f, 0xe5, 0xc8, 0x2b, 0x19, 0xb9, 0x7b, 0xe8, 0x0d, 0x2f, 0xd8, 0xc2, 0x24, 0xe8, 0xd7, 0x3f, 0x5a, 0xcb, 0xee, 0xe6, 0xd9, 0x52, 0xfc, 0xcd, 0x42, 0xd9, 0xb9, 0x5b, 0x91, 0xc9, 0x4f, 0xca, 0x7a, 0x65, 0x94, 0xc2, 0xca, 0xb8, 0x6f, 0x6c, 0x71, 0xbf, 0x5d, 0xa7, 0x39, 0x71, 0x43, 0xbc, 0x66, 0x94, 0x8b, 0x74, 0xa8, 0xbb, 0xca, 0x7f, 0x5a, 0x78, 0x49, 0xbb, 0xdd, 0x68, 0x6e, 0x7c, 0xd1, 0xbc, 0xb2, 0x50, 0xc0, 0x82, 0x1d, 0xbf, 0x14, 0x3a, 0x4a, 0x87, 0x35, 0xc2, 0x07, 0x27, 0x90, 0x89, 0x61, 0xc2, 0x81, 0x15, 0xa5, 0x96, 0x9c, 0xc7, 0x67, 0x12, 0x8b, 0x2b, 0x8d, 0xa0, 0xb9, 0xe2, 0xe6, 0x2d, 0x96, 0xa7, 0x1d, 0xe4, 0x03, 0x31, 0xd4, 0xac, 0xba, 0xe4, 0xdc, 0x40, 0x47, 0xb6, 0xbc, 0xe8, 0x83, 0x57, 0x49, 0xbf, 0x40, 0xe3, 0x9c, 0x63, 0x0b, 0xbc, 0xef, 0xd4, 0x0a, 0x6d, 0x58, 0xb8, 0x9d, 0xc3, 0xc5, 0x76, 0x37, 0xb5, 0x12, 0xb2, 0x5f, 0x7d, 0x81, 0xb2, 0xa8, 0xa1, 0xbc, 0x82, 0x7d, 0xb0, 0xc2, 0x8f, 0xfd, 0x86, 0x9c, 0xaf, 0xa9, 0x7b, 0x8e, 0x8a, 0xa8, 0xb0, 0x1d, 0x67, 0xc1, 0x8d, 0xf3, 0xb1, 0x75, 0x54, 0x05, 0x90, 0xfe, 0xb4, 0x53, 0x40, 0xce, 0x93, 0x98, 0xb7, 0x77, 0x2e, 0x91, 0x97, 0xc2, 0xb9, 0xa1, 0x1c, 0x77, 0xa1, 0x3d, 0xc2, 0x1e, 0x19, 0x80, 0x2d, 0xf3, 0x96, 0xd6, 0xe3, 0x54, 0x31, 0x34, 0x9c, 0x39, 0xe4, 0x01, 0x3c, 0xbd, 0xa0, 0x4f, 0xe4, 0x81, 0x54, 0x5c, 0xa9, 0x27, 0xe5, 0x05, 0x67, 0x34, 0xaf, 0x61, 0xe1, 0xee, 0x73, 0x9e, 0xaf, 0x02, 0xd2, 0xc6, 0x7d, 0x32, 0xab, 0x49, 0xc2, 0x67, 0x86, 0x07, 0xa9, 0xa3, 0xb0, 0xfe, 0x8d, 0x93, 0xa8, 0x5a, 0x9f, 0xa5, 0x92, 0x9d, 0xa7, 0x4c, 0x8c, 0xfb, 0x96, 0x2b, 0xa6, 0xed, 0x7a, 0x2a, 0x98, 0xd6, 0xa7, 0x8d, 0x67, 0x8f, 0x9b, 0x03, 0xa9, 0x4a, 0x55, 0xd6, 0x9d, 0x45, 0xac, 0x22, 0x44, 0xfa, 0x9f, 0xef, 0xaf, 0x26, 0x34, 0x7e, 0xa2, 0x4d, 0xb2, 0x81, 0x24, 0xed, 0xac, 0x37, 0xbb, 0x0a, 0x1e, 0xbd, 0x30, 0xa4, 0x88, 0xac, 0xe6, 0x29, 0x3c, 0x77, 0x8f, 0x61, 0xe5, 0x44, 0x54, 0xa0, 0x93, 0x91, 0xe2, 0x68, 0x63, 0xc3, 0x9b, 0xc5, 0xe4, 0x7e, 0x78, 0xde, 0xa4, 0x7d, 0xe3, 0x1d, 0x85, 0x04, 0xa3, 0x18, 0xd3, 0x17, 0x8e, 0xf1, 0xa1, 0x3f, 0xc2, 0x7f, 0x97, 0xa0, 0xa0, 0xaa, 0xb1, 0x83, 0x9e, 0x7d, 0x9f, 0x8b, 0x9f, 0xc4, 0xa1, 0xc3, 0x9f, 0x85, 0x8c, 0xe3, 0xa4, 0x26, 0x9e, 0xc9, 0x7a, 0x5a, 0xa5, 0xd6, 0x9f, 0x5a, 0x68, 0x05, 0xa7, 0x7e, 0xa1, 0x3d, 0x57, 0x2f, 0xa8, 0xfc, 0xa4, 0x67, 0x47, 0x40, 0xab, 0x08, 0xa7, 0x7a, 0x37, 0x3b, 0xad, 0x69, 0xaa, 0x6a, 0x27, 0xe0, 0xb6, 0x05, 0xb4, 0x55, 0x21, 0xd2, 0x37, 0x73, 0x77, 0x9b, 0xe3, 0xad, 0x53, 0xd3, 0x80, 0x60, 0xe5, 0xb4, 0x63, 0x51, 0x86, 0x65, 0xe3, 0x24, 0x76, 0xe8, 0x8f, 0x33, 0xe5, 0x6c, 0x8d, 0x0e, 0x99, 0x5b, 0xe5, 0xa0, 0x99, 0x00, 0x99, 0xee, 0xd5, 0x3d, 0xa1, 0x61, 0x98, 0xd6, 0xc4, 0x64, 0xa8, 0x47, 0x98, 0xb0, 0xb3, 0x47, 0xad, 0x78, 0x98, 0x17, 0xa0, 0xce, 0xb0, 0x02, 0x96, 0xcf, 0x8d, 0x2a, 0xb1, 0xf3, 0x95, 0xfc, 0x7a, 0x86, 0xb3, 0x3a, 0x96, 0x1e, 0x68, 0xe0, 0xb4, 0x46, 0x97, 0xb1, 0x57, 0xac, 0xb5, 0xb1, 0x99, 0xf9, 0x47, 0x05, 0xb7, 0x1e, 0x9c, 0x91, 0x36, 0xf2, 0xb8, 0xc0, 0x9f, 0xa8, 0x27, 0xed, 0xbf, 0x2e, 0xa9, 0x37, 0x21, 0x88, 0x54, 0x03, 0x70, 0x02, 0xea, 0x75, 0x63, 0xe0, 0x74, 0xb1, 0xe8, 0x55, 0x78, 0x64, 0x7a, 0x34, 0xe9, 0x2c, 0x8a, 0x2c, 0x83, 0xf8, 0xe7, 0x1d, 0x9e, 0xe0, 0x8e, 0xe9, 0xe6, 0xd3, 0xac, 0xe9, 0x92, 0x6d, 0xd9, 0x5a, 0xb3, 0x4b, 0x92, 0x2e, 0xc9, 0x09, 0xb8, 0x17, 0x92, 0x14, 0xb7, 0x78, 0xbb, 0x0e, 0x8f, 0xf8, 0xa2, 0x20, 0xbd, 0x61, 0x8e, 0x1c, 0x8d, 0xbf, 0xbe, 0x73, 0x8d, 0x04, 0x7b, 0x46, 0xbf, 0x75, 0x8c, 0x9a, 0x69, 0xb1, 0xbf, 0xfc, 0x8d, 0x99, 0x58, 0x33, 0xc0, 0xbb, 0x8e, 0xde, 0x46, 0x8d, 0xc1, 0xa8, 0x90, 0xe9, 0x35, 0x0d, 0xc2, 0xcd, 0x93, 0x77, 0x24, 0xc9, 0xc5, 0xd3, 0x99, 0xea, 0x1e, 0xba, 0x6a, 0x58, 0x61, 0x5c, 0xef, 0x43, 0x7b, 0x6a, 0x67, 0x6a, 0xee, 0xb7, 0x8a, 0xf4, 0x70, 0x18, 0xed, 0xd6, 0x9c, 0x94, 0x78, 0x7b, 0xeb, 0x8d, 0xb1, 0xf1, 0x85, 0x1c, 0xea, 0xe0, 0xbe, 0x7e, 0x8b, 0xb0, 0xdf, 0x3e, 0xc2, 0x24, 0x8a, 0xe1, 0xce, 0xb6, 0xc4, 0xe0, 0x89, 0x9f, 0xba, 0x43, 0xc7, 0x38, 0x86, 0xd3, 0xa3, 0x99, 0xc8, 0x73, 0x84, 0xaa, 0x8f, 0x02, 0xc8, 0xdb, 0x83, 0x64, 0x7c, 0x63, 0xc9, 0x53, 0x82, 0xb0, 0x6a, 0x56, 0xca, 0x28, 0x82, 0xc5, 0x57, 0x62, 0xcb, 0x10, 0x83, 0xbd, 0x44, 0x20, 0xcb, 0xda, 0x85, 0x8d, 0x31, 0x75, 0xcc, 0xb2, 0x87, 0x8b, 0x20, 0xc6, 0xcc, 0x0d, 0x8e, 0xd6, 0x1d, 0xec, 0x76, 0xb3, 0x56, 0xe0, 0xf3, 0x17, 0x8a, 0xb2, 0x5d, 0xf3, 0xf1, 0x2e, 0x9c, 0xe6, 0x62, 0xd6, 0xeb, 0xb5, 0xad, 0x3d, 0x6b, 0x98, 0xe9, 0xb0, 0xbc, 0xcd, 0x77, 0x3d, 0xe8, 0x92, 0xcf, 0x3e, 0x83, 0x1e, 0xea, 0xd4, 0xd0, 0x13, 0x82, 0xa6, 0xd5, 0x28, 0xd2, 0x37, 0x80, 0x18, 0xbf, 0x56, 0xd3, 0xac, 0x7d, 0x0e, 0xa8, 0x02, 0xd3, 0xfa, 0x7a, 0x5e, 0x91, 0xe0, 0xd4, 0x2b, 0x78, 0x4f, 0x7e, 0x20, 0xd4, 0x13, 0x76, 0xb7, 0x6a, 0x67, 0xd4, 0xc1, 0x76, 0x70, 0x55, 0xcb, 0xd5, 0x8e, 0x77, 0x26, 0x40, 0x6e, 0xd6, 0xe7, 0x79, 0x4c, 0x2c, 0xe0, 0xd0, 0x9b, 0x7e, 0xf9, 0x25, 0xa5, 0xce, 0xc6, 0x87, 0x3d, 0x22, 0x92, 0x8e, 0x2c, 0x44, 0x64, 0xf4, 0xc5, 0x9f, 0x0b, 0x4a, 0xfc, 0xef, 0x82, 0xab, 0xbb, 0x54, 0x1b, 0xeb, 0x26, 0xba, 0x53, 0x61, 0x46, 0xe8, 0xd7, 0xc9, 0xda, 0x68, 0x23, 0xe8, 0xf1, 0xd6, 0xa8, 0x70, 0xeb, 0xe6, 0xfe, 0xd9, 0x46, 0x72, 0x47, 0xd7, 0xdc, 0xdc, 0xe5, 0x70, 0xe8, 0xc1, 0xae, 0xde, 0x0e, 0x6c, 0xb7, 0xa9, 0x5d, 0xdd, 0xd8, 0x69, 0x80, 0x91, 0xe4, 0xdd, 0x2a, 0x66, 0xb6, 0x7d, 0x1d, 0xda, 0xb6, 0x64, 0x9c, 0x66, 0xba, 0xde, 0xb1, 0x60, 0x42, 0x4f, 0x4b, 0xdf, 0x07, 0x63, 0x92, 0x39, 0xe7, 0xd5, 0x7d, 0x69, 0xf0, 0x2c, 0xa1, 0xd1, 0xba, 0x72, 0xa7, 0x28, 0x0d, 0xd0, 0x06, 0x7c, 0x45, 0x25, 0x5b, 0x98, 0xa5, 0x34, 0xe0, 0xef, 0x62, 0xac, 0x87, 0x39, 0xb6, 0xef, 0xd2, 0xbc, 0xba, 0x45, 0xf3, 0xee, 0x22, 0xc8, 0x17, 0x4e, 0x5f, 0xec, 0x6f, 0xd2, 0xcb, 0x5a, 0x5e, 0xe6, 0xb9, 0xd7, 0xab, 0x5d, 0xf5, 0xdd, 0xe2, 0xda, 0xd3, 0x5e, 0xd7, 0xd4, 0xf3, 0xdd, 0xba, 0x5b, 0xf3, 0xbb, 0xe3, 0xde, 0x99, 0x56, 0xab, 0xa1, 0x63, 0xde, 0x4b, 0x53, 0x52, 0x8b, 0x80, 0xde, 0xa4, 0x4f, 0xfc, 0x77, 0x00, 0xe0, 0xbe, 0x4a, 0x02, 0x5f, 0xf2, 0xdf, 0x6a, 0x45, 0x68, 0x46, 0x70, 0xdc, 0x34, 0x48, 0xf7, 0x34, 0x2b, 0xd6, 0x69, 0x59, 0x3c, 0x2d, 0x46, 0xd3, 0x99, 0x63, 0x05, 0x29, 0xe0, 0xd1, 0x03, 0x6f, 0x6d, 0x26, 0xfa, 0xa6, 0x9f, 0x32, 0x19, 0xe7, 0x51, 0xb1, 0xf5, 0x34, 0x07, 0xe9, 0x36, 0xc3, 0x03, 0x39, 0x2b, 0xed, 0xb5, 0xd3, 0xd4, 0x3e, 0x00, 0xee, 0x7e, 0xd8, 0x1b, 0x44, 0xa2, 0xde, 0x3b, 0xda, 0x7d, 0x48, 0x9c, 0xdb, 0x00, 0xde, 0xaa, 0x4a, 0x8a, 0xd1, 0x06, 0xde, 0xa2, 0x44, 0x3c, 0xb4, 0x44, 0xdd, 0xd9, 0x3c, 0x21, 0x98, 0x09, 0xdd, 0x66, 0x37, 0x35, 0x81, 0xdd, 0xdb, 0xea, 0x34, 0x86, 0x6d, 0x4d, 0xdb, 0x06, 0x33, 0xad, 0x58, 0xea, 0xda, 0x99, 0x31, 0xd2, 0x41, 0xf4, 0xd5, 0x09, 0x30, 0xd3, 0x31, 0x36, 0xd2, 0x00, 0x30, 0xb1, 0x2e, 0x34, 0xd3, 0x63, 0x55, 0x9e, 0x2a, 0xe8, 0xd2, 0x1a, 0x5e, 0x89, 0x29, 0x10, 0xad, 0x7c, 0x30, 0x63, 0xe5, 0x3b, 0xb7, 0x2e, 0x31, 0x19, 0xe5, 0xe2, 0xcf, 0x42, 0x33, 0xdb, 0xea, 0x8e, 0xd6, 0x5a, 0x35, 0x83, 0xdd, 0xf5, 0xd7, 0xda, 0x35, 0xb6, 0xdb, 0xb1, 0xd9, 0xaa, 0x35, 0xeb, 0xd8, 0xdd, 0xda, 0x50, 0x34, 0x5b, 0xc5, 0x04, 0xda, 0xd1, 0x33, 0x0f, 0xad, 0xc4, 0xda, 0xc9, 0x32, 0x05, 0x94, 0xc4, 0xda, 0x3a, 0x30, 0xb9, 0x80, 0xef, 0xd8, 0x89, 0x2f, 0x81, 0x6d, 0x14, 0xd7, 0xe8, 0x2e, 0x87, 0x59, 0xa6, 0xd9, 0x20, 0x2d, 0x2c, 0x43, 0xbb, 0xd3, 0x85, 0x2d, 0xf1, 0x30, 0x74, 0xd1, 0x86, 0x2e, 0x75, 0x2e, 0x45, 0xd0, 0x41, 0x2e, 0xd2, 0x2c, 0xed, 0xcf, 0x63, 0x2f, 0x15, 0x2c, 0x03, 0x1f, 0x9a, 0xd1, 0x6e, 0xe0, 0x9e, 0x1d, 0x1e, 0xd8, 0xaf, 0xe2, 0x46, 0x1d, 0x4a, 0xe0, 0xb1, 0xe1, 0x8b, 0x1d, 0x52, 0xe2, 0x6c, 0xdf, 0xc3, 0x22, 0xe4, 0xde, 0x41, 0xd3, 0xe1, 0x24, 0xe4, 0xdc, 0xde, 0xca, 0x44, 0x24, 0x37, 0xcf, 0xde, 0xba, 0x1a, 0x2c, 0x31, 0xd2, 0xd9, 0xae, 0x68, 0x2e, 0x32, 0xce, 0x45, 0x9f, 0xb3, 0x2f, 0x40, 0xcb, 0xc6, 0x93, 0x9f, 0x2e, 0xa8, 0xcb, 0x5a, 0x85, 0x3a, 0x2e, 0x69, 0xcb, 0xfe, 0x75, 0x2b, 0x2c, 0x00, 0xce, 0x12, 0x5e, 0x83, 0x29, 0xb7, 0xcf, 0xeb, 0x2f, 0xe5, 0x29, 0xf6, 0xcf, 0xb2, 0x2d, 0x37, 0x2a, 0x35, 0xcf, 0x7d, 0x2a, 0xc8, 0x2a, 0xf5, 0xce, 0xda, 0x29, 0x3e, 0x21, 0xbf, 0xca, 0x35, 0xe0, 0x74, 0x21, 0x1f, 0xd1, 0xfc, 0xe1, 0x21, 0x1e, 0xc5, 0xda, 0xcb, 0xe2, 0xd6, 0x1e, 0xf8, 0xe1, 0xe9, 0xe2, 0x33, 0x21, 0x32, 0xe2, 0x33, 0xdc, 0x30, 0x25, 0xc0, 0xde, 0xca, 0xd1, 0xd4, 0x26, 0xd2, 0xdc, 0xf6, 0xc7, 0x9a, 0x2c, 0x31, 0xd6, 0x3a, 0xb5, 0xfd, 0x2e, 0x7e, 0xd0, 0x1b, 0xa4, 0xe2, 0x2f, 0x31, 0xcc, 0x4c, 0x95, 0x74, 0x2e, 0xa4, 0xcb, 0x5e, 0x83, 0x2b, 0x2e, 0x07, 0xcd, 0x39, 0x6c, 0xec, 0x2e, 0x0e, 0xcf, 0xb2, 0x52, 0xda, 0x2b, 0x4b, 0xd0, 0x94, 0x2f, 0xc4, 0x2b, 0x91, 0xd0, 0x3f, 0x2c, 0x4b, 0x2b, 0xdd, 0xcf, 0xea, 0x29, 0x43, 0x2c, 0xda, 0xce, 0xbd, 0x28, 0x24, 0x21, 0xc8, 0xc8, 0x8f, 0xe3, 0x5d, 0x23, 0x6d, 0xca, 0xcf, 0xe1, 0x47, 0x24, 0x8c, 0xd2, 0x61, 0xe1, 0x2c, 0x21, 0x84, 0xdd, 0x40, 0xe3, 0xc4, 0x22, 0x0d, 0xe4, 0x0f, 0xe3, 0x5e, 0x26, 0x03, 0xe2, 0x90, 0xd8, 0xd3, 0x2a, 0xcb, 0xdf, 0x9d, 0xcd, 0x44, 0x2d, 0xdf, 0xdb, 0x4f, 0xbf, 0x6c, 0x30, 0xba, 0xd3, 0x75, 0xab, 0x7e, 0x30, 0x15, 0xcc, 0x55, 0x96, 0x8a, 0x2e, 0xa8, 0xc9, 0xc5, 0x80, 0x5b, 0x2e, 0xd2, 0xcd, 0xf4, 0x65, 0x93, 0x2e, 0xa2, 0xd3, 0x91, 0x42, 0x44, 0x2e, 0x7b, 0xd1, 0xdf, 0x2f, 0x8b, 0x2e, 0xb2, 0xd1, 0x49, 0x2a, 0x98, 0x4e, 0x9a, 0xd4, 0x1d, 0x22, 0xa7, 0x57, 0xbf, 0xd2, 0x79, 0x20, 0xb0, 0x22, 0xcd, 0xc1, 0x1a, 0xe5, 0xc7, 0x23, 0xf4, 0xc6, 0xde, 0xe4, 0xe0, 0x26, 0x21, 0xcb, 0xbb, 0xe2, 0x8a, 0x29, 0x54, 0xd3, 0x64, 0xe2, 0x04, 0x27, 0x0b, 0xe0, 0x63, 0xe5, 0x75, 0x2d, 0x8d, 0xe3, 0x63, 0xe0, 0xd5, 0x31, 0x82, 0xe2, 0x62, 0xd5, 0x2f, 0x3b, 0x5c, 0xe4, 0x4d, 0xcc, 0x1a, 0x44, 0x45, 0xe0, 0xe1, 0xba, 0x2d, 0x4a, 0xba, 0xdc, 0x16, 0xa3, 0xef, 0x49, 0x9f, 0xdc, 0x88, 0x8b, 0x46, 0x48, 0x7d, 0xdb, 0xc8, 0x6a, 0xa9, 0x45, 0x0f, 0xdd, 0x53, 0x45, 0xe0, 0x4b, 0x26, 0xda, 0x7b, 0x2e, 0x8c, 0x59, 0x69, 0xd9, 0x3c, 0x23, 0x95, 0x75, 0x03, 0xd3, 0x6d, 0x22, 0x64, 0x7d, 0x2f, 0xd1, 0xbf, 0x1f, 0x90, 0x26, 0xb9, 0xb6, 0x4b, 0xe5, 0xe6, 0x26, 0x86, 0xbe, 0xeb, 0xe7, 0xda, 0x28, 0x2c, 0xc5, 0x8f, 0xe6, 0xdd, 0x2b, 0x38, 0xcd, 0x49, 0xe4, 0xac, 0x33, 0x70, 0xd5, 0x59, 0xe3, 0xb3, 0x40, 0x70, 0xe0, 0x0d, 0xe3, 0x54, 0x4d, 0xcd, 0xe3, 0x5a, 0xdc, 0xb9, 0x5a, 0x77, 0xe0, 0xca, 0xce, 0xf2, 0x64, 0xbf, 0xd7, 0xc4, 0xb9, 0xc0, 0x68, 0xba, 0xd5, 0xda, 0xa5, 0x87, 0x6b, 0x57, 0xd4, 0xce, 0x8e, 0xde, 0x6e, 0x00, 0xd4, 0xb4, 0x75, 0x4a, 0x71, 0x39, 0xd5, 0x2f, 0x5a, 0x68, 0x74, 0x41, 0xda, 0xed, 0x37, 0x8c, 0x79, 0x21, 0xd8, 0xdd, 0x25, 0xa6, 0x83, 0x42, 0xd2, 0x16, 0x1f, 0x87, 0x95, 0x15, 0xcf, 0x3e, 0x1a, 0x5a, 0x2a, 0x43, 0xaf, 0xd5, 0xe4, 0x79, 0x2b, 0xd5, 0xb5, 0xad, 0xe6, 0x61, 0x2d, 0xcc, 0xbc, 0x4f, 0xe9, 0x5a, 0x32, 0x90, 0xc3, 0xe8, 0xe8, 0xd7, 0x4b, 0xab, 0xcf, 0x55, 0xe6, 0xa0, 0x5d, 0xd2, 0xd8, 0xa0, 0xe3, 0xf3, 0x67, 0x3f, 0xd4, 0xd1, 0xd4, 0x30, 0x6f, 0x5c, 0xcf, 0x79, 0xc4, 0x2b, 0x76, 0x3f, 0xcb, 0xa8, 0xb2, 0xf0, 0x7b, 0xa5, 0xc9, 0x01, 0x9f, 0x6c, 0x7f, 0x69, 0xc7, 0xcb, 0x8a, 0xd2, 0x83, 0x53, 0xc7, 0x74, 0x73, 0xe8, 0x86, 0xb5, 0xc7, 0xfe, 0x5b, 0x7c, 0x89, 0x4c, 0xca, 0x41, 0x43, 0x02, 0x8f, 0x1b, 0xcb, 0x89, 0x2b, 0xc8, 0x92, 0xf0, 0xcc, 0xbe, 0x17, 0xab, 0x9b, 0xd2, 0xcc, 0x37, 0x16, 0xe7, 0x2d, 0x5f, 0xa2, 0xf7, 0xe3, 0x6b, 0x30, 0x32, 0xa9, 0x1a, 0xe3, 0xff, 0x34, 0xcb, 0xae, 0xc5, 0xe4, 0x0d, 0x4c, 0xe1, 0xb8, 0xca, 0xe7, 0x77, 0x5d, 0x90, 0xc3, 0x56, 0xe5, 0x1d, 0x6e, 0x35, 0xc8, 0xe9, 0xde, 0x1a, 0x77, 0x78, 0xc5, 0x8d, 0xce, 0xf6, 0x80, 0x4b, 0xc1, 0xc5, 0xbe, 0x55, 0x87, 0xca, 0xbf, 0x1c, 0xad, 0xd1, 0x8c, 0xe3, 0xbd, 0x2a, 0x9b, 0x1a, 0x91, 0x1d, 0xbb, 0xf1, 0x86, 0xea, 0x95, 0x00, 0xbb, 0x95, 0x72, 0x20, 0x97, 0xba, 0xbc, 0x57, 0x5d, 0x79, 0x99, 0xee, 0xbe, 0x94, 0x49, 0x17, 0x9c, 0xf5, 0xc1, 0x22, 0x34, 0xcc, 0x9f, 0xce, 0xc3, 0x82, 0x21, 0x36, 0xa5, 0x06, 0xc7, 0x88, 0x19, 0x92, 0x2f, 0xb8, 0x99, 0x52, 0xe3, 0xa2, 0x33, 0x23, 0x9e, 0x7b, 0xe4, 0x7c, 0x45, 0x81, 0xa3, 0x35, 0xe3, 0xee, 0x5b, 0x41, 0xac, 0x2e, 0xe4, 0xa8, 0x6c, 0xf0, 0xb3, 0x80, 0xe2, 0xe9, 0x7e, 0x2c, 0xb8, 0xee, 0xda, 0x73, 0x87, 0xff, 0xb8, 0x10, 0xcc, 0x25, 0x90, 0x79, 0xb5, 0x2e, 0xbb, 0xaa, 0x98, 0x62, 0xb3, 0xbf, 0xaa, 0xcf, 0x9d, 0x38, 0xb2, 0xb8, 0x98, 0x14, 0xa0, 0xb0, 0xb2, 0x09, 0x84, 0xb5, 0xa3, 0x22, 0xb2, 0x6d, 0x71, 0x6e, 0xa4, 0x98, 0xb3, 0x7d, 0x5e, 0x90, 0xa6, 0x83, 0xb5, 0xe2, 0x4c, 0xc1, 0xa8, 0xc4, 0xb8, 0xa4, 0x3a, 0xb1, 0xaa, 0xf0, 0xbb, 0xc8, 0x29, 0xbd, 0xb1, 0x33, 0xc1, 0x84, 0x1e, 0x47, 0x33, 0x06, 0x8c, 0x4a, 0xe4, 0xdc, 0x44, 0xdf, 0x91, 0x92, 0xe4, 0xca, 0x5c, 0x03, 0x97, 0x13, 0xe3, 0x56, 0x6b, 0x1c, 0x9f, 0x50, 0xe5, 0x13, 0x7e, 0x7b, 0xa7, 0x22, 0xe4, 0x1b, 0x90, 0x29, 0xad, 0x71, 0xdc, 0x84, 0x99, 0x6a, 0xad, 0x06, 0xcc, 0xeb, 0xa2, 0x8b, 0xab, 0x49, 0xbc, 0x8e, 0xa9, 0x48, 0xaa, 0x58, 0xaa, 0x95, 0xac, 0x48, 0xaa, 0x30, 0x97, 0x82, 0xae, 0x75, 0xa9, 0x65, 0x84, 0x97, 0xaf, 0xd3, 0xa9, 0x4e, 0x71, 0x97, 0xb0, 0xe6, 0xaa, 0xc4, 0x5f, 0xa8, 0xb2, 0x45, 0xad, 0x4b, 0x4f, 0x16, 0xb3, 0xda, 0xb0, 0x50, 0x3e, 0xeb, 0xb5, 0xdd, 0xb3, 0x32, 0x2d, 0xed, 0xbb, 0x07, 0xb9, 0xed, 0x23, 0x7e, 0x47, 0xc9, 0x7f, 0x3c, 0xe6, 0xbb, 0x5a, 0xa5, 0x83, 0xf9, 0xe6, 0x05, 0x6a, 0x14, 0x8a, 0x0c, 0xe3, 0xf0, 0x7f, 0xb1, 0x92, 0xf0, 0xe6, 0x38, 0x93, 0x14, 0x9c, 0xd3, 0xe5, 0x15, 0xa3, 0xba, 0xa3, 0xe2, 0xde, 0xd7, 0xab, 0x62, 0xa3, 0xe7, 0xcf, 0x46, 0xb3, 0x17, 0xa3, 0x2e, 0xbe, 0xb9, 0xb7, 0xf5, 0xa2, 0x5a, 0xab, 0xa5, 0xba, 0x13, 0xa1, 0x44, 0x97, 0xa9, 0xbb, 0x6f, 0xa0, 0x47, 0x84, 0xca, 0xbc, 0x16, 0x9f, 0xf0, 0x72, 0x44, 0xbc, 0xcd, 0xa0, 0x95, 0x60, 0x86, 0xbd, 0xe6, 0xa2, 0x65, 0x4f, 0x38, 0xbf, 0x27, 0xa4, 0xa9, 0x3d, 0xe0, 0xc0, 0x41, 0xa7, 0x6f, 0x2c, 0xed, 0xc4, 0x1c, 0xad, 0xcc, 0x22, 0x2a, 0x59, 0x5e, 0x74, 0x98, 0xe9, 0xfd, 0x6e, 0x94, 0x78, 0x22, 0xe8, 0xa0, 0x7f, 0xcc, 0x7e, 0x53, 0xe6, 0xa2, 0x8d, 0x92, 0x87, 0x52, 0xe6, 0xd4, 0xa3, 0x4b, 0x92, 0xa6, 0xe7, 0x25, 0xb7, 0xcb, 0x9c, 0x83, 0xe3, 0xff, 0xbc, 0x31, 0x9c, 0x6b, 0xd3, 0x58, 0xc1, 0xc3, 0x9c, 0x5d, 0xc2, 0x1f, 0xc4, 0x2f, 0x9a, 0x43, 0xac, 0xc7, 0xc5, 0xc0, 0x98, 0x16, 0x98, 0x5e, 0xc6, 0x95, 0x96, 0xde, 0x85, 0x75, 0xc6, 0xdf, 0x96, 0x1e, 0x73, 0x1b, 0xc7, 0x29, 0x96, 0x3c, 0x61, 0x14, 0xc7, 0xc2, 0x97, 0x4b, 0x4e, 0xd9, 0xc8, 0x8d, 0x98, 0x68, 0x3c, 0x03, 0xc9, 0x98, 0x9a, 0x5c, 0x2a, 0x24, 0xcb, 0x40, 0x9f, 0xd7, 0x1f, 0xfc, 0x71, 0x7a, 0x64, 0x01, 0xee, 0xca, 0x82, 0x77, 0x6b, 0xf7, 0xeb, 0xaf, 0x8f, 0xff, 0x74, 0xd4, 0xea, 0xed, 0xa0, 0x1f, 0x7d, 0x82, 0xe9, 0xb9, 0xb6, 0x0d, 0x89, 0x48, 0xeb, 0x35, 0xc9, 0x3c, 0x95, 0xfe, 0xe9, 0xd0, 0xcc, 0x77, 0x95, 0x45, 0xd8, 0x65, 0xce, 0x52, 0x93, 0xc9, 0xc4, 0xe0, 0xd0, 0x0f, 0x91, 0x59, 0xae, 0xaa, 0xd0, 0xe5, 0x8f, 0x0a, 0x99, 0xe7, 0xd1, 0x12, 0x8d, 0x55, 0x86, 0x39, 0xd1, 0x31, 0x8c, 0x41, 0x73, 0x8e, 0xd1, 0x27, 0x8b, 0xa7, 0x60, 0x83, 0xd1, 0x97, 0x8c, 0x00, 0x4c, 0xc3, 0xd2, 0x1b, 0x8c, 0xa7, 0x38, 0x92, 0xd3, 0x1d, 0x8e, 0x40, 0x26, 0xa0, 0xd0, 0x08, 0x94, 0x24, 0x21, 0x56, 0x83, 0x42, 0x5b, 0xe6, 0xf2, 0xb6, 0x8e, 0xb3, 0x61, 0xc4, 0xed, 0x7a, 0x9e, 0x51, 0x67, 0x29, 0xe9, 0xc7, 0xb0, 0x08, 0x6f, 0x56, 0xea, 0xe8, 0xc0, 0x32, 0x7b, 0x7f, 0xe9, 0x30, 0xd0, 0x89, 0x85, 0xc3, 0xe7, 0xb2, 0xd7, 0x46, 0x89, 0xb0, 0xdc, 0x55, 0xdc, 0x07, 0x8a, 0xc2, 0xc9, 0xe1, 0xdd, 0x33, 0x87, 0x30, 0xb2, 0xe3, 0xdd, 0x18, 0x83, 0xd1, 0x9b, 0x72, 0xdb, 0x6d, 0x80, 0xe1, 0x86, 0x75, 0xda, 0x54, 0x7d, 0xbf, 0x71, 0x85, 0xd8, 0x63, 0x7c, 0xdd, 0x5b, 0x64, 0xd7, 0x50, 0x7c, 0xe2, 0x45, 0xd3, 0xd6, 0x5c, 0x7e, 0xf1, 0x32, 0x7c, 0xd3, 0xdb, 0x84, 0x06, 0x28, 0xa7, 0xd1, 0x1c, 0x8b, 0x6a, 0x24, 0x8e, 0x91, 0xff, 0x4b, 0x00, 0xf1, 0xd8, 0xa2, 0x88, 0x51, 0x25, 0xec, 0x97, 0xb0, 0x62, 0x5e, 0x8d, 0xe9, 0x18, 0xbd, 0x28, 0x63, 0x63, 0xe8, 0xc7, 0xcd, 0x72, 0x6d, 0xc3, 0xe9, 0x3b, 0xd7, 0xef, 0x74, 0x41, 0xe3, 0x19, 0xd8, 0xf9, 0x76, 0x7d, 0xd8, 0x5f, 0xdc, 0xa2, 0x75, 0xfe, 0xc4, 0x16, 0xdd, 0xda, 0x71, 0xf4, 0xab, 0x49, 0xdd, 0x7a, 0x6f, 0x08, 0x94, 0x11, 0xdb, 0xcc, 0x6c, 0xf4, 0x7e, 0xc1, 0xda, 0x5b, 0x69, 0xea, 0x69, 0x2c, 0xda, 0x55, 0x6a, 0x01, 0x53, 0x57, 0xdd, 0x51, 0x69, 0x59, 0x3c, 0x8c, 0xd8, 0x96, 0x6f, 0xfb, 0x2f, 0x75, 0xd3, 0xf2, 0x77, 0x8f, 0x29, 0xfc, 0xd1, 0xb5, 0x80, 0x4c, 0x26, 0xb7, 0x9d, 0x01, 0x37, 0x83, 0xf2, 0x09, 0xb1, 0x9b, 0x44, 0x4b, 0xec, 0x3f, 0xc0, 0x69, 0x4d, 0x52, 0xec, 0xd2, 0xcb, 0xe3, 0x57, 0x06, 0xeb, 0x06, 0xd5, 0x37, 0x60, 0x0f, 0xe5, 0xc5, 0xd8, 0x44, 0x60, 0x2f, 0xdd, 0xc2, 0xd9, 0xa5, 0x65, 0x5d, 0xd7, 0x23, 0xdd, 0x8d, 0x60, 0x15, 0xbd, 0x72, 0xde, 0xc3, 0x5c, 0x58, 0xa4, 0xed, 0xde, 0x1f, 0x58, 0xde, 0x8d, 0x99, 0xde, 0xc1, 0x56, 0x15, 0x79, 0x7d, 0xe0, 0x3b, 0x50, 0xab, 0x62, 0x75, 0xe1, 0x3a, 0x4d, 0xd1, 0x4a, 0xcb, 0xde, 0xf9, 0x52, 0x51, 0x36, 0xd0, 0xd8, 0xec, 0x5e, 0xa3, 0x2e, 0xf4, 0xd3, 0xa6, 0x69, 0xa2, 0x2a, 0x91, 0xd1, 0xeb, 0x73, 0x13, 0x28, 0x38, 0xac, 0x5d, 0x34, 0x22, 0xe9, 0x8b, 0xb6, 0x79, 0x35, 0xe6, 0xeb, 0x24, 0xcc, 0x34, 0x40, 0xa1, 0xef, 0xb8, 0xd1, 0x22, 0x46, 0xe3, 0xe9, 0x34, 0xd8, 0xb3, 0x4a, 0x49, 0xdd, 0xdd, 0xdb, 0x14, 0x50, 0x67, 0xdb, 0x7c, 0xdc, 0x64, 0x55, 0x17, 0xd2, 0x17, 0xe0, 0x33, 0x4b, 0xd7, 0xb8, 0x7c, 0xe0, 0xa4, 0x46, 0x39, 0x9c, 0xd1, 0xdf, 0x51, 0x3d, 0xbc, 0x84, 0xc0, 0xdd, 0x3e, 0x36, 0x97, 0x6e, 0x6e, 0xdc, 0x4d, 0x35, 0xfe, 0x5a, 0x3e, 0xdb, 0x6f, 0x34, 0x07, 0x43, 0x70, 0xd6, 0x9b, 0x32, 0x76, 0x32, 0x68, 0xd6, 0x8b, 0x45, 0x7f, 0x2e, 0xf0, 0xd5, 0x26, 0x5b, 0x74, 0x2b, 0xcc, 0xd3, 0x80, 0x63, 0xb8, 0x29, 0xb4, 0xab, 0xe9, 0x33, 0x81, 0xe2, 0x8d, 0xb4, 0x2b, 0x34, 0x7f, 0xe3, 0xdd, 0xd1, 0xaa, 0x36, 0x0a, 0xec, 0xd0, 0xd6, 0xb9, 0x36, 0xd5, 0xdd, 0xaf, 0xd8, 0x1e, 0x37, 0x21, 0xdb, 0x9a, 0xda, 0x0b, 0x39, 0xff, 0xd9, 0x3a, 0xda, 0xaf, 0x35, 0xf3, 0xc5, 0x36, 0xdb, 0x3b, 0x34, 0xb8, 0xae, 0x10, 0xdb, 0x4f, 0x33, 0xb1, 0x95, 0x2e, 0xdb, 0x5f, 0x32, 0x92, 0x81, 0xcb, 0xd9, 0xa4, 0x31, 0x27, 0x6d, 0xf3, 0xd8, 0xf2, 0x30, 0x50, 0x5a, 0xcd, 0xd9, 0xe5, 0x2f, 0x1c, 0x45, 0x15, 0xd4, 0xa9, 0x2f, 0x13, 0x31, 0x49, 0xd2, 0x66, 0x2f, 0x59, 0x2e, 0xee, 0xd0, 0xf9, 0x2f, 0x8b, 0x2d, 0x76, 0xd3, 0x6d, 0x58, 0x90, 0x2a, 0x92, 0x22, 0x02, 0xd3, 0x39, 0xe0, 0x7e, 0x1e, 0xe1, 0xda, 0xfa, 0xe2, 0xe0, 0x1f, 0x2f, 0xe1, 0xb3, 0xe2, 0x48, 0x20, 0x43, 0xe2, 0xfb, 0xe0, 0x02, 0x24, 0xdc, 0xdf, 0x0e, 0xd4, 0x66, 0x26, 0xb7, 0xdd, 0xa9, 0xca, 0xc3, 0x28, 0x47, 0xda, 0x8d, 0xc2, 0x0d, 0x2d, 0x1a, 0xd3, 0x58, 0xae, 0xa5, 0x2e, 0x95, 0xce, 0x75, 0x9f, 0x89, 0x2f, 0x6c, 0xcb, 0xc8, 0x93, 0x34, 0x2e, 0xaa, 0xcb, 0x60, 0x84, 0xa0, 0x2e, 0x8f, 0xcc, 0x22, 0x74, 0x99, 0x2c, 0x99, 0xce, 0x5e, 0x5e, 0xd5, 0x2b, 0x0c, 0xd0, 0x91, 0x30, 0x83, 0x2b, 0x3b, 0xd0, 0x56, 0x2e, 0x1a, 0x2b, 0x69, 0xd0, 0x22, 0x2b, 0xe3, 0x2b, 0x9a, 0xcf, 0xef, 0x29, 0xd6, 0x23, 0x1d, 0xca, 0xf9, 0xe1, 0x0b, 0x24, 0x18, 0xd3, 0xbb, 0xe0, 0xde, 0x20, 0xd6, 0xdd, 0x30, 0xe3, 0x8d, 0x21, 0x43, 0xe3, 0x0c, 0xe3, 0x0f, 0x24, 0x49, 0xe2, 0xd2, 0xdc, 0x79, 0x27, 0xe9, 0xdf, 0xa8, 0xd2, 0x6a, 0x28, 0x8f, 0xde, 0x0c, 0xc8, 0x67, 0x2d, 0x70, 0xd6, 0xee, 0xb6, 0x62, 0x2f, 0x0d, 0xd0, 0x69, 0xa4, 0xcf, 0x2f, 0x63, 0xcc, 0x53, 0x95, 0x0c, 0x2e, 0xa6, 0xcb, 0x63, 0x82, 0x90, 0x2e, 0x5a, 0xcd, 0x70, 0x6c, 0xa2, 0x2f, 0x3c, 0xd0, 0x52, 0x53, 0x3b, 0x2c, 0xcb, 0xd1, 0x4f, 0x30, 0x94, 0x2c, 0xf8, 0xd0, 0xfd, 0x2d, 0x8b, 0x2d, 0x26, 0xd0, 0xb3, 0x2a, 0xcd, 0x44, 0xb6, 0xd4, 0x35, 0x23, 0xbb, 0x24, 0x79, 0xca, 0x41, 0xe2, 0x1e, 0x24, 0xfe, 0xcb, 0xaf, 0xe1, 0xea, 0x27, 0x31, 0xd4, 0x70, 0xe1, 0x6b, 0x24, 0x07, 0xdf, 0xb4, 0xe4, 0x9d, 0x25, 0x52, 0xe4, 0x51, 0xe3, 0x69, 0x28, 0xa9, 0xe3, 0x36, 0xd9, 0x1f, 0x2c, 0xdc, 0xe0, 0x69, 0xcd, 0xbe, 0x2f, 0x51, 0xdc, 0x35, 0xc0, 0x2a, 0x30, 0xb6, 0xd3, 0x54, 0xaa, 0x91, 0x30, 0x1f, 0xcc, 0x20, 0x95, 0xdb, 0x2e, 0xdf, 0xc8, 0xff, 0x7f, 0x93, 0x2f, 0x31, 0xcd, 0xff, 0x65, 0x2b, 0x30, 0x6b, 0xd4, 0x79, 0x42, 0x5f, 0x2f, 0xf7, 0xd2, 0xa1, 0x30, 0xbc, 0x40, 0x96, 0xd6, 0x8f, 0x2a, 0x63, 0x52, 0x60, 0xd6, 0xd6, 0x27, 0x81, 0x77, 0x69, 0xd5, 0x0a, 0x23, 0x1f, 0x24, 0xce, 0xc1, 0xb6, 0xe6, 0x15, 0x26, 0x01, 0xc8, 0xee, 0xe4, 0xee, 0x27, 0xef, 0xcc, 0xbb, 0xe3, 0x30, 0x2c, 0x42, 0xd5, 0x85, 0xe2, 0x50, 0x29, 0xf1, 0xe2, 0xea, 0xe6, 0x54, 0x30, 0xab, 0xe3, 0x81, 0xe0, 0xbe, 0x33, 0x7e, 0xe3, 0x2e, 0xd5, 0xc7, 0x42, 0x9e, 0xe4, 0xb0, 0xcc, 0xa1, 0x4d, 0x60, 0xe0, 0x78, 0xba, 0x3b, 0x4f, 0x88, 0xdd, 0xd1, 0xa5, 0x83, 0x4e, 0x13, 0xde, 0x89, 0x8c, 0x70, 0x4d, 0xb0, 0xdd, 0xfe, 0x6c, 0x5b, 0x4e, 0x52, 0xdc, 0x81, 0x4c, 0x36, 0x52, 0xc9, 0xdc, 0x4b, 0x31, 0x8c, 0x5f, 0x25, 0xdc, 0x96, 0x27, 0xf9, 0x7a, 0x4b, 0xd8, 0x06, 0x24, 0xcf, 0x7f, 0xb0, 0xd5, 0x0b, 0x21, 0x8c, 0x27, 0x5f, 0xb8, 0x75, 0xe7, 0x4f, 0x28, 0xdf, 0xc0, 0xaf, 0xe7, 0xae, 0x2a, 0x56, 0xc7, 0x9c, 0xe6, 0xe8, 0x2d, 0x2d, 0xce, 0x69, 0xe5, 0x37, 0x36, 0x84, 0xd7, 0x83, 0xe3, 0xfa, 0x43, 0x1f, 0xe3, 0x4c, 0xe5, 0x75, 0x56, 0xc2, 0xe2, 0x07, 0xdb, 0x3f, 0x63, 0x1e, 0xe2, 0xea, 0xd0, 0x89, 0x6f, 0xc1, 0xe3, 0x0b, 0xc4, 0xba, 0x74, 0x43, 0xe1, 0x1c, 0xb0, 0x39, 0x76, 0x7c, 0xdf, 0xed, 0x9a, 0x30, 0x79, 0xd7, 0xdf, 0x9f, 0x80, 0x95, 0x7c, 0x9e, 0xdf, 0x96, 0x66, 0x35, 0x7d, 0x99, 0xe4, 0x4b, 0x3d, 0x2a, 0x81, 0xc4, 0xde, 0x8d, 0x2d, 0xca, 0x89, 0xa1, 0xd7, 0xef, 0x24, 0x1c, 0x98, 0xff, 0xd4, 0xb2, 0x20, 0xa0, 0x2b, 0xc8, 0xb2, 0x49, 0xe5, 0x69, 0x2d, 0xd7, 0xb6, 0x97, 0xe6, 0xed, 0x30, 0x0c, 0xbe, 0xc6, 0xea, 0x29, 0x36, 0x49, 0xc7, 0x18, 0xe9, 0xf8, 0x53, 0xfb, 0xd1, 0xcf, 0xe7, 0x14, 0x5f, 0x40, 0xdb, 0x1b, 0xe5, 0x9b, 0x6e, 0xe4, 0xdd, 0x35, 0xdd, 0x74, 0x79, 0xc4, 0xdb, 0x72, 0xcf, 0x7d, 0x81, 0x54, 0xd6, 0xda, 0xbe, 0xde, 0x85, 0x66, 0xd4, 0xfe, 0xab, 0x03, 0x89, 0x5b, 0xd3, 0x91, 0x95, 0x80, 0x8c, 0x2d, 0xd2, 0xe6, 0x7e, 0x06, 0x8f, 0x8f, 0xd2, 0xb4, 0x65, 0xfe, 0x94, 0x87, 0xd0, 0xc2, 0x4d, 0xa7, 0x99, 0x0d, 0xd0, 0xd0, 0x35, 0x5b, 0x9b, 0xe3, 0xd4, 0x75, 0x1f, 0xe2, 0xa1, 0x70, 0xd1, 0x57, 0x1a, 0xff, 0x2e, 0xf0, 0xa5, 0x2e, 0xe3, 0xea, 0x31, 0xa7, 0xaa, 0x3f, 0xe4, 0x77, 0x38, 0xdf, 0xb2, 0x69, 0xe7, 0x29, 0x55, 0x33, 0xbb, 0x6d, 0xe6, 0xdb, 0x61, 0x97, 0xc4, 0xd4, 0xe3, 0x59, 0x72, 0x93, 0xc9, 0x44, 0xdd, 0x6c, 0x82, 0x30, 0xcf, 0x75, 0xd7, 0xeb, 0x8a, 0xcc, 0xcd, 0x37, 0xc8, 0xdf, 0x91, 0x4e, 0xca, 0xca, 0xb8, 0x8c, 0x97, 0x7f, 0xc8, 0x47, 0xa6, 0x1c, 0x9b, 0xa7, 0xc7, 0x44, 0x91, 0xc2, 0x9f, 0x11, 0xc6, 0x7c, 0x7c, 0xa4, 0xa1, 0x75, 0xc6, 0x93, 0x67, 0x45, 0xa3, 0x1e, 0xc6, 0xb7, 0x52, 0x21, 0xa5, 0x93, 0xc9, 0x62, 0x3c, 0x7c, 0xa8, 0x71, 0xcc, 0xbb, 0x28, 0x8d, 0xab, 0x73, 0xcf, 0xbc, 0x17, 0x4e, 0x31, 0x3e, 0x9b, 0xc3, 0xe3, 0xf5, 0x36, 0x05, 0x9f, 0x37, 0xe4, 0xb4, 0x53, 0x53, 0xa6, 0xa6, 0xe5, 0x8c, 0x5f, 0xda, 0xae, 0x55, 0xe3, 0x94, 0x72, 0x42, 0xb4, 0xb2, 0xe0, 0x93, 0x82, 0xec, 0xbc, 0x53, 0xda, 0x48, 0x92, 0xa4, 0xc2, 0xf5, 0xd5, 0xd3, 0x9b, 0x08, 0xc0, 0xd5, 0xc5, 0xea, 0xa3, 0xed, 0xbf, 0x28, 0xb6, 0x34, 0xa7, 0xef, 0xbe, 0x31, 0xa3, 0x9b, 0xab, 0x48, 0xbd, 0x5e, 0x8f, 0x9e, 0xad, 0xa4, 0xbd, 0x65, 0x7b, 0xd2, 0xae, 0xef, 0xbe, 0x3a, 0x68, 0x24, 0xb0, 0x18, 0xbf, 0xff, 0x55, 0x79, 0xb2, 0x00, 0xc2, 0x94, 0x42, 0xa3, 0xb4, 0x05, 0xc5, 0x72, 0x30, 0x61, 0xb6, 0x05, 0xc8, 0x93, 0x1f, 0xa5, 0x39, 0x90, 0x8e, 0xc7, 0xe5, 0x20, 0x53, 0x2a, 0x92, 0x51, 0xe2, 0x16, 0x5e, 0x3d, 0x99, 0xe4, 0xe3, 0xf0, 0x73, 0x85, 0xa2, 0x21, 0xe5, 0x1e, 0x84, 0xb5, 0xa9, 0xb0, 0xe2, 0xc1, 0x93, 0x92, 0xb0, 0x62, 0xdc, 0xe1, 0xa4, 0xad, 0xb7, 0x7b, 0xd6, 0x8a, 0xac, 0xd1, 0xb5, 0xc8, 0xc6, 0x5c, 0xb3, 0xc1, 0xb4, 0xd5, 0xb5, 0x10, 0xb6, 0xd2, 0xb4, 0xcf, 0xa2, 0xea, 0xb8, 0xd3, 0xb4, 0x70, 0x8f, 0x2c, 0xba, 0x15, 0xb4, 0x73, 0x7b, 0xb4, 0xba, 0xfb, 0xb5, 0x8b, 0x69, 0x34, 0xbc, 0x14, 0xb7, 0x94, 0x57, 0x88, 0xbd, 0x56, 0xba, 0x9a, 0x47, 0x4d, 0xbe, 0xae, 0xbd, 0xf5, 0x36, 0x53, 0xbf, 0xe6, 0xbf, 0x81, 0x24, 0x37, 0x52, 0xe1, 0x80, 0x07, 0xe5, 0xbc, 0x5f, 0x04, 0x87, 0x3b, 0xe6, 0x43, 0x71, 0x0c, 0x8d, 0x31, 0xe5, 0xab, 0x84, 0xea, 0x95, 0x76, 0xe5, 0xdc, 0x97, 0x31, 0x9f, 0xa6, 0xe4, 0x59, 0xa7, 0x5c, 0xa6, 0x70, 0xde, 0xf6, 0xb6, 0xda, 0xae, 0xa9, 0xda, 0x09, 0xbd, 0x06, 0xae, 0x25, 0xc8, 0x9e, 0xc1, 0x60, 0xad, 0x32, 0xb6, 0x0f, 0xc3, 0x2c, 0xab, 0x99, 0xa2, 0x9c, 0xc4, 0x59, 0xaa, 0xdb, 0x8f, 0x37, 0xc5, 0x02, 0xaa, 0xa7, 0x7b, 0xe0, 0xc5, 0x98, 0xaa, 0xce, 0x69, 0x7c, 0xc6, 0x50, 0xac, 0x00, 0x57, 0x6c, 0xc7, 0x28, 0xad, 0xdb, 0x45, 0x42, 0xc8, 0x1d, 0xaf, 0xc2, 0x33, 0x3f, 0xc9, 0x53, 0xb2, 0x13, 0x22, 0x55, 0x5f, 0x25, 0x74, 0xa0, 0xe7, 0xe9, 0x71, 0x59, 0x7a, 0x67, 0xe8, 0xc2, 0x87, 0x63, 0x82, 0xf2, 0xe7, 0x16, 0x93, 0x1a, 0x8b, 0x16, 0xe6, 0x7e, 0xa7, 0xca, 0x96, 0x6a, 0xe7, 0x75, 0xb8, 0xb2, 0x9f, 0x0e, 0xe3, 0x92, 0xc6, 0x27, 0xa6, 0xf5, 0xdd, 0xc2, 0xca, 0x31, 0xa5, 0xed, 0xcb, 0x6b, 0xcc, 0xb6, 0xa4, 0x54, 0xb7, 0x30, 0xce, 0x93, 0xa2, 0x96, 0xa3, 0x1e, 0xcf, 0x3e, 0xa1, 0x85, 0x8f, 0xd5, 0xcf, 0x66, 0xa0, 0x8e, 0x7c, 0xa7, 0xcf, 0xbe, 0xa0, 0x5e, 0x69, 0xf9, 0xcf, 0xf8, 0xa0, 0x86, 0x57, 0x19, 0xd0, 0xcc, 0xa1, 0xda, 0x44, 0x03, 0xd0, 0xe3, 0xa1, 0xf5, 0x2f, 0xdc, 0xd1, 0xfc, 0xa3, 0x60, 0x1e, 0x1b, 0x75, 0x4a, 0x69, 0x28, 0xed, 0xfc, 0x88, 0x97, 0x71, 0x02, 0xed, 0x0f, 0x90, 0xa4, 0x76, 0xe8, 0xea, 0xbf, 0xa5, 0x01, 0x81, 0xea, 0xe9, 0x99, 0xb8, 0xee, 0x8d, 0x1e, 0xeb, 0x56, 0xca, 0xfb, 0x98, 0xe9, 0xea, 0xcd, 0xd3, 0x27, 0x9d, 0x0c, 0xe0, 0x33, 0xd8, 0x0d, 0x9e, 0x64, 0xcf, 0xf1, 0xd9, 0x93, 0x9b, 0xc3, 0xb9, 0x23, 0xd9, 0xa5, 0x99, 0x33, 0xa4, 0x04, 0xd9, 0x49, 0x97, 0x4c, 0x8f, 0xf3, 0xd8, 0xff, 0x95, 0x6f, 0x7c, 0xca, 0xd6, 0x51, 0x92, 0x61, 0x66, 0xf6, 0xd4, 0xe6, 0x91, 0x75, 0x51, 0x54, 0xd3, 0xb5, 0x91, 0x51, 0x3b, 0xb1, 0xd3, 0xc6, 0x94, 0x9f, 0x2e, 0xd0, 0xd3, 0x0a, 0x99, 0x52, 0x26, 0x1e, 0x89, 0x3b, 0x60, 0xf3, 0xf1, 0x16, 0x93, 0x7d, 0x63, 0xe4, 0xec, 0x5c, 0xa0, 0x8b, 0x6a, 0xcd, 0xea, 0x13, 0xb4, 0x28, 0x74, 0x4a, 0xeb, 0x06, 0xc4, 0x03, 0x7f, 0xff, 0xe9, 0xfc, 0xd1, 0xb4, 0x89, 0x33, 0xe5, 0x6a, 0xd7, 0xbc, 0x8c, 0xb9, 0xdb, 0x91, 0xdb, 0xc0, 0x8e, 0x9b, 0xcc, 0x03, 0xdd, 0x13, 0x8b, 0xc2, 0xb5, 0x06, 0xdd, 0x0d, 0x88, 0x4b, 0x9d, 0x69, 0xdb, 0x49, 0x85, 0x97, 0x88, 0x99, 0xda, 0x22, 0x82, 0x97, 0x73, 0xfb, 0xd7, 0xd4, 0x81, 0xa7, 0x5e, 0x1d, 0xd6, 0xae, 0x81, 0xd9, 0x48, 0xec, 0xd5, 0x4c, 0x82, 0x9f, 0x34, 0xb2, 0xd4, 0x16, 0x88, 0xb4, 0x2c, 0xae, 0xd3, 0x26, 0x8e, 0x8a, 0x27, 0x1a, 0x96, 0xa5, 0x4c, 0x41, 0xf0, 0xad, 0xa2, 0x59, 0x5a, 0x4b, 0xeb, 0xe7, 0xb3, 0xd2, 0x62, 0x56, 0xe8, 0xd9, 0xc0, 0x19, 0x68, 0x76, 0xe8, 0xee, 0xd0, 0x7a, 0x72, 0xc1, 0xe9, 0x69, 0xd7, 0x0e, 0x77, 0x3e, 0xdf, 0x00, 0xd8, 0xb6, 0x7a, 0xaa, 0xd8, 0xb5, 0xdc, 0x55, 0x7b, 0x31, 0xc6, 0x38, 0xdc, 0xde, 0x77, 0xf6, 0xad, 0xf0, 0xdd, 0x4e, 0x74, 0x4d, 0x95, 0xfb, 0xdb, 0xa4, 0x72, 0x14, 0x80, 0xf4, 0xda, 0x05, 0x6f, 0x5e, 0x6b, 0xef, 0xd9, 0xf3, 0x6f, 0x2b, 0x56, 0x06, 0xdc, 0x53, 0x6e, 0xde, 0x3f, 0x1e, 0xd8, 0x59, 0x75, 0x30, 0x31, 0xe5, 0xd6, 0x3e, 0x7c, 0x86, 0x2c, 0x05, 0xd3, 0x72, 0x84, 0x56, 0x28, 0x1f, 0xa8, 0x09, 0x45, 0x01, 0xed, 0xa1, 0xaf, 0x39, 0x4b, 0xc9, 0xeb, 0xd7, 0xc3, 0x12, 0x50, 0xcd, 0xec, 0x34, 0xcc, 0x77, 0x5c, 0x00, 0xea, 0xa7, 0xd6, 0xce, 0x61, 0x17, 0xe3, 0xf8, 0xd8, 0x3b, 0x64, 0xe4, 0xdc, 0xfe, 0xd9, 0x32, 0x69, 0xb8, 0xd7, 0x7d, 0xdd, 0x4c, 0x65, 0x64, 0xc0, 0x30, 0xde, 0x9e, 0x60, 0xed, 0xa7, 0x21, 0xde, 0x95, 0x5e, 0x4c, 0x8f, 0xff, 0xde, 0x7e, 0x5b, 0x85, 0x7b, 0x88, 0xdf, 0x79, 0x58, 0xfe, 0x65, 0x83, 0xe0, 0x26, 0x58, 0x76, 0x50, 0xd5, 0xe0, 0x5f, 0x5a, 0xdc, 0x39, 0xc8, 0xda, 0x9d, 0x63, 0x17, 0x30, 0x98, 0xd5, 0x43, 0x6e, 0x9c, 0x2b, 0xd8, 0xd3, 0x60, 0x77, 0x6d, 0x29, 0x55, 0xae, 0x34, 0x35, 0xa9, 0xeb, 0x2a, 0xbb, 0x03, 0x37, 0xcd, 0xed, 0x13, 0xca, 0xee, 0x48, 0x80, 0xed, 0x0d, 0xd5, 0x41, 0x4c, 0x02, 0xe4, 0x71, 0xd9, 0xa8, 0x51, 0xfc, 0xde, 0x25, 0xd8, 0x08, 0x57, 0x48, 0xda, 0x5d, 0xdb, 0x38, 0x59, 0xc0, 0xd5, 0x46, 0xe0, 0x08, 0x52, 0x7e, 0xbb, 0xd8, 0xe1, 0x50, 0x4b, 0x46, 0x9f, 0x37, 0xe1, 0x3c, 0x47, 0x60, 0x88, 0xaa, 0xe2, 0x70, 0x3f, 0x0e, 0x74, 0x36, 0xdd, 0x85, 0x39, 0x05, 0x5b, 0xde, 0xdc, 0x23, 0x35, 0xe4, 0x44, 0xbb, 0xd8, 0x2a, 0x34, 0x18, 0x33, 0x9a, 0xd8, 0x5f, 0x4f, 0x4b, 0x30, 0x28, 0xd6, 0x80, 0x5d, 0x97, 0x2c, 0xc9, 0xd3, 0x1b, 0x6a, 0x3a, 0x29, 0xe5, 0xb0, 0x12, 0x34, 0xd4, 0xe4, 0x3e, 0xbf, 0xec, 0x35, 0xa5, 0xea, 0x4c, 0xd3, 0xce, 0x36, 0xc1, 0xee, 0x24, 0xd7, 0x09, 0x37, 0xf1, 0xdd, 0x74, 0xd8, 0x9e, 0x3a, 0xee, 0xdb, 0xb7, 0xdb, 0x06, 0x47, 0x77, 0xd9, 0xd6, 0xde, 0x21, 0x47, 0x5c, 0xd0, 0x6f, 0xdb, 0x94, 0x36, 0x1b, 0xae, 0x3c, 0xdb, 0xc0, 0x35, 0x1a, 0x95, 0x74, 0xdc, 0x34, 0x34, 0x2f, 0x82, 0x69, 0xda, 0x9f, 0x32, 0xa3, 0x6e, 0xb5, 0xd9, 0xe1, 0x31, 0xec, 0x5b, 0xd5, 0xda, 0x94, 0x30, 0xd1, 0x46, 0x4e, 0xd5, 0xc9, 0x30, 0x39, 0x32, 0x22, 0xd3, 0x45, 0x30, 0x3b, 0x2f, 0x95, 0xd1, 0xae, 0x30, 0x44, 0x2d, 0xfd, 0xd4, 0x9b, 0x5c, 0x25, 0x2b, 0x36, 0x22, 0xb1, 0xd4, 0xc0, 0xe1, 0x8a, 0x20, 0x76, 0xdd, 0x37, 0xe3, 0x71, 0x20, 0xdf, 0xe2, 0x8e, 0xe2, 0xea, 0x22, 0xc7, 0xe3, 0x58, 0xe0, 0x1d, 0x26, 0x82, 0xdf, 0xb0, 0xd4, 0xcf, 0x28, 0x35, 0xde, 0x49, 0xcb, 0x24, 0x29, 0x6d, 0xdb, 0x5e, 0xc2, 0xc2, 0x2d, 0xd5, 0xd3, 0xb7, 0xae, 0xcb, 0x2e, 0xdf, 0xce, 0x92, 0x9f, 0x52, 0x2f, 0x90, 0xcb, 0xc2, 0x92, 0xc4, 0x2e, 0xaa, 0xcb, 0x63, 0x84, 0x06, 0x2e, 0x09, 0xcc, 0xe9, 0x71, 0xd1, 0x2d, 0x11, 0xce, 0x9b, 0x5f, 0x17, 0x2c, 0x1f, 0xd1, 0x14, 0x31, 0x01, 0x2c, 0x42, 0xd0, 0xdd, 0x2e, 0xd0, 0x2c, 0x64, 0xd0, 0xa8, 0x2c, 0xc9, 0x2c, 0x88, 0xd0, 0x76, 0x2a, 0xe4, 0x24, 0x03, 0xcc, 0x5f, 0xe2, 0x7f, 0x24, 0x9d, 0xd5, 0x5a, 0xe2, 0x1e, 0x22, 0xb2, 0xdf, 0x75, 0xe4, 0x38, 0x23, 0x3f, 0xe3, 0xfd, 0xe3, 0xc1, 0x26, 0xc1, 0xe3, 0x4d, 0xdc, 0xae, 0x29, 0x9d, 0xe0, 0x4e, 0xd2, 0xd9, 0x29, 0xf0, 0xde, 0xdf, 0xc9, 0x03, 0x2e, 0x63, 0xd7, 0x6f, 0xb6, 0xa2, 0x2f, 0x74, 0xd0, 0x98, 0xa4, 0xa5, 0x2f, 0x88, 0xcc, 0x4f, 0x94, 0x9e, 0x2e, 0x86, 0xca, 0xe8, 0x81, 0xcf, 0x2e, 0x9c, 0xcd, 0x9e, 0x6c, 0x50, 0x30, 0x21, 0xd0, 0xca, 0x53, 0x74, 0x2d, 0xed, 0xd1, 0xdc, 0x31, 0x32, 0x2e, 0x0a, 0xd1, 0x8e, 0x2e, 0x7e, 0x2e, 0x29, 0xd1, 0x46, 0x2c, 0x09, 0x4c, 0x88, 0xd6, 0xc8, 0x24, 0x62, 0x25, 0xbe, 0xca, 0xff, 0xe2, 0x96, 0x26, 0x67, 0xcd, 0x19, 0xe3, 0x43, 0x27, 0x6a, 0xd6, 0x32, 0xe2, 0xee, 0x26, 0x25, 0xe2, 0x09, 0xe5, 0x54, 0x27, 0xdc, 0xe4, 0x5c, 0xe3, 0x47, 0x2a, 0xa4, 0xe3, 0xaa, 0xd9, 0x4b, 0x2e, 0x7b, 0xe0, 0xbb, 0xcd, 0xbd, 0x30, 0x57, 0xdc, 0xd6, 0xc0, 0xb5, 0x31, 0xe3, 0xd2, 0xe8, 0xaa, 0x74, 0x2f, 0x23, 0xc9, 0x77, 0x94, 0x00, 0x2e, 0xd9, 0xc8, 0xe6, 0x7e, 0xd9, 0x31, 0x19, 0xcc, 0x57, 0x65, 0x66, 0x31, 0xa5, 0xd5, 0x1a, 0x42, 0x3c, 0x30, 0xf6, 0xd3, 0x23, 0x31, 0x88, 0x44, 0xd9, 0xd8, 0x00, 0x2b, 0xe1, 0x54, 0x7d, 0xd8, 0xd1, 0x29, 0xb0, 0x7b, 0xdc, 0xd7, 0xfb, 0x24, 0x7d, 0x25, 0xff, 0xc3, 0xae, 0xe6, 0xd3, 0x28, 0x41, 0xcb, 0xab, 0xe3, 0xbd, 0x29, 0xfa, 0xce, 0x19, 0xe4, 0x5a, 0x2b, 0xe4, 0xd7, 0x7d, 0xe4, 0x23, 0x2c, 0x4a, 0xe3, 0xe8, 0xe5, 0x44, 0x32, 0xcb, 0xe3, 0x88, 0xe0, 0x95, 0x39, 0x18, 0xe5, 0x33, 0xd8, 0xa5, 0x47, 0x0e, 0xe5, 0x65, 0xcd, 0x76, 0x51, 0x7c, 0xe1, 0xc1, 0xbb, 0xb1, 0x57, 0xe3, 0xde, 0xb9, 0xa8, 0x79, 0x57, 0x08, 0xdd, 0xe0, 0x8e, 0x35, 0x55, 0x3e, 0xde, 0x80, 0x70, 0x3e, 0x53, 0xa4, 0xde, 0x32, 0x4e, 0x93, 0x57, 0xc1, 0xdd, 0xe2, 0x32, 0xc0, 0x67, 0xe1, 0xdd, 0x77, 0x2c, 0x48, 0x7f, 0x99, 0xdc, 0x90, 0x27, 0x46, 0x85, 0x82, 0xd7, 0x6c, 0x24, 0x90, 0x28, 0xf0, 0xbb, 0x25, 0xe8, 0x76, 0x2a, 0xcb, 0xc2, 0x69, 0xe7, 0x77, 0x2c, 0x11, 0xc9, 0x99, 0xe6, 0xdb, 0x2f, 0xf1, 0xcf, 0x98, 0xe6, 0x03, 0x3b, 0xe1, 0xda, 0x89, 0xe5, 0xbc, 0x4f, 0xb2, 0xe1, 0x52, 0xe2, 0x8a, 0x5c, 0x6f, 0xe1, 0x8e, 0xdb, 0x33, 0x68, 0x94, 0xe2, 0xbf, 0xd0, 0x84, 0x72, 0xeb, 0xe6, 0x14, 0xc6, 0x0c, 0x75, 0xb4, 0xe4, 0x51, 0xb2, 0x45, 0x78, 0x75, 0xe3, 0x78, 0x9a, 0x9e, 0x7d, 0xd0, 0xde, 0xf6, 0x81, 0x1d, 0x7c, 0x31, 0xe3, 0xf3, 0x62, 0x3a, 0x81, 0xea, 0xe1, 0x8e, 0x46, 0x26, 0x88, 0x14, 0xe0, 0x3d, 0x32, 0xed, 0x95, 0x45, 0xda, 0x81, 0x25, 0x45, 0x9d, 0xfd, 0xd8, 0xf4, 0x23, 0x30, 0x2d, 0x1b, 0xb4, 0xb8, 0xe6, 0x54, 0x2f, 0x97, 0xb7, 0x64, 0xe7, 0x66, 0x31, 0xd2, 0xc0, 0x78, 0xe9, 0xe5, 0x52, 0x14, 0xca, 0xba, 0xe5, 0x52, 0x58, 0xd7, 0xd3, 0x73, 0xe7, 0x8c, 0x64, 0x7e, 0xdb, 0x03, 0xe3, 0xce, 0x75, 0x40, 0xdb, 0x69, 0xdb, 0x1d, 0x80, 0x77, 0xde, 0xea, 0xd2, 0xc2, 0x8c, 0x08, 0xe3, 0x3a, 0xca, 0x20, 0x91, 0x6e, 0xdd, 0xf9, 0xb4, 0xab, 0x95, 0x45, 0xdc, 0xf7, 0x9f, 0x9c, 0x98, 0xce, 0xdb, 0x87, 0x88, 0x71, 0x9b, 0xbc, 0xd9, 0xe3, 0x6f, 0xf2, 0x9e, 0x59, 0xd8, 0x34, 0x57, 0x7a, 0xa3, 0x27, 0xd6, 0x66, 0x3e, 0xbf, 0xa4, 0xf3, 0xdb, 0xf8, 0x27, 0xda, 0xa7, 0x1f, 0xd6, 0x8a, 0x1f, 0x1b, 0x30, 0xc0, 0xa7, 0x1e, 0xe3, 0xc2, 0x32, 0xab, 0xac, 0x53, 0xe4, 0x10, 0x4e, 0x85, 0xb7, 0x22, 0xe6, 0xee, 0x5c, 0x7e, 0xbd, 0xdf, 0xe6, 0x2e, 0x64, 0xf8, 0xc5, 0xcb, 0xe2, 0x11, 0x77, 0x56, 0xca, 0x1a, 0xdb, 0xe4, 0x85, 0xef, 0xd1, 0x91, 0xd7, 0xd7, 0x95, 0x32, 0xd8, 0x7e, 0xd3, 0x16, 0x9d, 0xe8, 0xd3, 0xa0, 0xc3, 0x19, 0xa1, 0xb3, 0xd3, 0x6b, 0xb1, 0x29, 0xa4, 0xf7, 0xd2, 0xc0, 0x9d, 0x1f, 0xa8, 0x27, 0xd1, 0xe0, 0x87, 0x8f, 0xaa, 0x61, 0xd1, 0xd8, 0x71, 0x5b, 0xac, 0x81, 0xd1, 0x3c, 0x5b, 0x4a, 0xae, 0xcc, 0xd1, 0xda, 0x44, 0xc7, 0xb1, 0x57, 0xd5, 0xb1, 0x2f, 0x8c, 0xb3, 0x0c, 0xd5, 0x3e, 0x25, 0x04, 0x34, 0x2e, 0x9b, 0x84, 0xe2, 0x14, 0x40, 0x57, 0xa0, 0x6b, 0xe3, 0x1b, 0x5a, 0x6b, 0xaa, 0x35, 0xe5, 0x98, 0x66, 0x0e, 0xb2, 0x5f, 0xe5, 0x44, 0x75, 0xa6, 0xb5, 0xc6, 0xde, 0x52, 0x86, 0xeb, 0xbe, 0x9b, 0xd8, 0x97, 0x96, 0x49, 0xc4, 0xae, 0xd5, 0xc4, 0xa5, 0xd0, 0xcb, 0x4c, 0xd0, 0x6c, 0xac, 0xc4, 0xc9, 0xf8, 0xc0, 0x37, 0xb1, 0xf8, 0xc9, 0x1a, 0xae, 0xaa, 0xb5, 0x21, 0xc8, 0x5c, 0x9b, 0x1a, 0xb7, 0x2a, 0xc8, 0x4e, 0x87, 0x43, 0xb8, 0x82, 0xc9, 0x16, 0x72, 0xc3, 0xb9, 0x5a, 0xca, 0xf9, 0x5e, 0x8d, 0xbb, 0x12, 0xcc, 0x92, 0x4a, 0x96, 0xbc, 0xad, 0xcf, 0x51, 0x38, 0x1e, 0xbe, 0x81, 0xd2, 0x2a, 0x26, 0xb9, 0x41, 0x97, 0x90, 0xce, 0xe4, 0x92, 0x5a, 0xf6, 0x95, 0x93, 0xe3, 0x0a, 0x62, 0x9b, 0x9c, 0xc6, 0xe4, 0x8e, 0x77, 0xb5, 0xa5, 0x95, 0xe6, 0x13, 0x87, 0xc2, 0xaa, 0xc2, 0xdf, 0xed, 0x98, 0x67, 0xb2, 0x69, 0xda, 0x09, 0xa7, 0x79, 0xb9, 0xa9, 0xd5, 0xd1, 0xb6, 0xa9, 0xc0, 0x88, 0xd0, 0x34, 0xbe, 0x5c, 0xbf, 0x7f, 0xbf, 0xb0, 0xc0, 0xe4, 0xc0, 0x0c, 0xad, 0x47, 0xc2, 0x74, 0xbf, 0xd4, 0x99, 0xd9, 0xc3, 0x71, 0xc0, 0x0a, 0x86, 0x59, 0xc4, 0x60, 0xc1, 0x66, 0x73, 0x63, 0xc5, 0x59, 0xc3, 0x52, 0x60, 0xc9, 0xc6, 0x49, 0xc5, 0xe5, 0x4f, 0xfa, 0xc7, 0x15, 0xc7, 0xb5, 0x3d, 0xb6, 0xc8, 0x3f, 0xc8, 0xfc, 0x2b, 0x11, 0x5a, 0x44, 0x83, 0x9d, 0xe5, 0xfe, 0x64, 0xeb, 0x88, 0x97, 0xe3, 0x69, 0x77, 0x14, 0x8f, 0xf0, 0xe5, 0x74, 0x8a, 0x7e, 0x99, 0x44, 0xe5, 0xa5, 0x9a, 0x9c, 0xa1, 0x99, 0xe2, 0x90, 0xaa, 0x89, 0xa8, 0xd8, 0xdc, 0x8f, 0xb8, 0xd5, 0xb0, 0x55, 0xd8, 0x4e, 0xc4, 0xb4, 0xb7, 0x3d, 0xd0, 0xec, 0xc9, 0x83, 0xb7, 0x33, 0xbf, 0xa5, 0xcb, 0xc3, 0xb6, 0x70, 0xac, 0xc0, 0xcd, 0x54, 0xb6, 0x07, 0x99, 0x89, 0xce, 0x1b, 0xb6, 0x40, 0x86, 0x7a, 0xce, 0xc8, 0xb6, 0x9b, 0x73, 0x66, 0xcf, 0x74, 0xb7, 0xc3, 0x60, 0xc6, 0xd0, 0x1d, 0xb9, 0x51, 0x4e, 0x52, 0xd0, 0xce, 0xba, 0xcc, 0x3b, 0xe1, 0xd2, 0x05, 0xbd, 0x15, 0x2a, 0x88, 0x6c, 0x08, 0x78, 0xa3, 0xe9, 0x1f, 0x75, 0xd2, 0x7d, 0xfc, 0xe8, 0xca, 0x89, 0x28, 0x86, 0x0b, 0xe6, 0xb9, 0x98, 0x5e, 0x8e, 0xb8, 0xe6, 0x5a, 0xae, 0x99, 0x9a, 0x74, 0xe6, 0xa2, 0xba, 0x37, 0xa1, 0x18, 0xe1, 0xee, 0xc8, 0xaa, 0xa9, 0x0b, 0xdb, 0xab, 0xd3, 0x5a, 0xaf, 0xd7, 0xd4, 0xed, 0xd4, 0xe2, 0xad, 0xf6, 0xc0, 0xa0, 0xd7, 0xa1, 0xac, 0xfa, 0xad, 0xb1, 0xd8, 0x28, 0xab, 0xd6, 0x9a, 0x01, 0xd7, 0xea, 0xaa, 0xc8, 0x86, 0x20, 0xd8, 0x43, 0xaa, 0x2a, 0x72, 0xfe, 0xd6, 0x2e, 0xa8, 0x61, 0x5d, 0xeb, 0xd4, 0xe3, 0xa8, 0x32, 0x48, 0xfb, 0xd3, 0x4a, 0xa7, 0xde, 0x34, 0xda, 0xd3, 0x47, 0xad, 0x56, 0x2b, 0xfd, 0x7d, 0x27, 0x6c, 0x75, 0xec, 0x2b, 0x89, 0xf7, 0x75, 0x1e, 0xec, 0x66, 0x94, 0xf7, 0x7a, 0x73, 0xea, 0x75, 0xa9, 0x4b, 0x85, 0xd0, 0xe9, 0xa6, 0xbc, 0xef, 0x91, 0x01, 0xeb, 0x33, 0xcb, 0xcc, 0x9b, 0x01, 0xe8, 0x7a, 0xd4, 0xd1, 0x9f, 0x43, 0xdc, 0xf6, 0xda, 0xa7, 0xa4, 0x06, 0xd4, 0xb7, 0xde, 0xb5, 0xa3, 0x14, 0xc0, 0x52, 0xdd, 0xdd, 0x9f, 0xd9, 0xa9, 0xf8, 0xdb, 0x62, 0x9c, 0x63, 0x93, 0xdc, 0xd8, 0xec, 0x98, 0xe5, 0x7e, 0x9b, 0xd6, 0x2a, 0x96, 0x27, 0x69, 0x27, 0xd4, 0x9b, 0x95, 0x53, 0x53, 0xd7, 0xd3, 0x80, 0x94, 0xdd, 0x3e, 0x1f, 0xd3, 0xaa, 0x98, 0x94, 0x31, 0xc7, 0xd3, 0x50, 0x9d, 0x68, 0x2b, 0x7a, 0x8a, 0xd8, 0x62, 0x1e, 0xf0, 0x9a, 0x98, 0x6a, 0x6a, 0x23, 0xeb, 0x04, 0xa4, 0xbe, 0x6e, 0x9c, 0xea, 0x44, 0xb6, 0x2d, 0x7a, 0x25, 0xe8, 0xff, 0xc7, 0x6c, 0x84, 0x3f, 0xea, 0xa5, 0xd3, 0x8f, 0x8b, 0xba, 0xe2, 0x84, 0xd7, 0xd8, 0x8f, 0xd0, 0xdb, 0x24, 0xdb, 0x56, 0x92, 0xfc, 0xcd, 0x81, 0xdd, 0x3b, 0x90, 0x0d, 0xb7, 0x69, 0xdc, 0xc7, 0x8c, 0xfd, 0x9f, 0xfd, 0xda, 0xe2, 0x89, 0xee, 0x8a, 0x5e, 0xd9, 0x79, 0x87, 0x2e, 0x75, 0xce, 0xd7, 0x7b, 0x85, 0x1d, 0x5f, 0xfc, 0xd5, 0xf9, 0x84, 0xe6, 0x4a, 0xe2, 0xd4, 0xcd, 0x86, 0x14, 0x35, 0xb9, 0xd3, 0x9e, 0x8c, 0x92, 0x2f, 0x3b, 0xd3, 0xdd, 0x91, 0x2c, 0x2b, 0x82, 0x9a, 0xe5, 0x52, 0xb2, 0xee, 0xc8, 0xa6, 0x74, 0x61, 0xf6, 0xe9, 0x4f, 0xb6, 0x97, 0x63, 0x8d, 0xe8, 0xc4, 0xc2, 0x6a, 0x6d, 0xd1, 0xe9, 0x4e, 0xd3, 0x80, 0x78, 0x18, 0xe9, 0xaa, 0xd7, 0x3c, 0x7a, 0xf2, 0xdd, 0xbc, 0xd8, 0x13, 0x7f, 0x06, 0xd8, 0x5e, 0xdb, 0xe2, 0x80, 0x03, 0xc8, 0xa0, 0xdc, 0xe5, 0x7d, 0x16, 0xb0, 0xe1, 0xdd, 0x70, 0x78, 0x9f, 0x99, 0x35, 0xdb, 0x86, 0x75, 0x96, 0x82, 0x5c, 0xda, 0xd1, 0x73, 0x88, 0x6d, 0xcd, 0xd9, 0xbd, 0x74, 0x01, 0x59, 0x2e, 0xd8, 0xb6, 0x75, 0x1b, 0x43, 0xe6, 0xd7, 0x50, 0x79, 0x52, 0x33, 0xe3, 0xd5, 0x74, 0x80, 0x87, 0x2e, 0x68, 0xd4, 0x13, 0x88, 0x4b, 0x2a, 0x35, 0xa7, 0x9e, 0x4b, 0x1c, 0xed, 0x59, 0xaf, 0x4a, 0x4e, 0xac, 0xec, 0xbb, 0xc0, 0x56, 0x5c, 0xf8, 0xe9, 0x54, 0xce, 0xf4, 0x62, 0x57, 0xe9, 0x91, 0xd8, 0x43, 0x63, 0x90, 0xe2, 0x6c, 0xd8, 0x33, 0x68, 0xd2, 0xdc, 0x43, 0xd8, 0xe6, 0x6e, 0x72, 0xd7, 0x9b, 0xdc, 0xf2, 0x6b, 0x79, 0xc3, 0x45, 0xde, 0x11, 0x64, 0x7c, 0xa6, 0x5c, 0xde, 0x80, 0x61, 0xe2, 0x91, 0x44, 0xde, 0x43, 0x60, 0x6e, 0x7d, 0x22, 0xdb, 0x17, 0x60, 0x8d, 0x67, 0x6d, 0xde, 0xbb, 0x5e, 0x5c, 0x52, 0x8e, 0xdf, 0x09, 0x63, 0x9e, 0x3c, 0xbd, 0xdd, 0x2e, 0x69, 0x06, 0x32, 0x60, 0xd6, 0xef, 0x73, 0x9c, 0x2d, 0x31, 0xd4, 0xe3, 0x7b, 0xd1, 0x2a, 0x85, 0xb0, 0x0a, 0x37, 0x31, 0xec, 0xc9, 0xb9, 0xb6, 0x3f, 0xc3, 0xeb, 0x26, 0xcd, 0xdd, 0x4d, 0x2d, 0xec, 0x60, 0xd7, 0x4b, 0x53, 0xf5, 0xe1, 0xf2, 0xd6, 0x5e, 0x57, 0x10, 0xdc, 0x9f, 0xd8, 0xfd, 0x5d, 0x83, 0xdb, 0x50, 0xda, 0x44, 0x5d, 0x48, 0xd7, 0xc2, 0xdd, 0xad, 0x58, 0xd7, 0xbd, 0x7c, 0xde, 0xb7, 0x52, 0xed, 0xa2, 0x06, 0xe0, 0xce, 0x4c, 0x1f, 0x8b, 0x2b, 0xe2, 0x26, 0x48, 0x4a, 0x77, 0x56, 0xdf, 0xb8, 0x3e, 0xbb, 0x5e, 0x96, 0xdd, 0x94, 0x3d, 0xd1, 0x4a, 0x29, 0xdb, 0x00, 0x38, 0x41, 0x35, 0x5d, 0xda, 0x81, 0x59, 0xf5, 0x31, 0x27, 0xd7, 0x6c, 0x61, 0xf0, 0x2d, 0x9c, 0xd4, 0x40, 0x6e, 0xdc, 0x2a, 0xaf, 0xba, 0x26, 0x35, 0x7b, 0xe9, 0x58, 0xd1, 0x0d, 0x3a, 0x4f, 0xf0, 0x53, 0xd8, 0x69, 0x38, 0x94, 0xe6, 0x34, 0xd7, 0x99, 0x3b, 0x91, 0xdd, 0x74, 0xd9, 0xce, 0x48, 0x77, 0xdc, 0x0d, 0xdb, 0x2c, 0x4b, 0x3d, 0xda, 0x19, 0xde, 0xe9, 0x4b, 0xb7, 0xd1, 0x6f, 0xde, 0x2e, 0x42, 0xe2, 0xb5, 0x11, 0xdc, 0x21, 0x36, 0x4c, 0x95, 0x9f, 0xdc, 0xaa, 0x35, 0x85, 0x82, 0xbb, 0xdb, 0x81, 0x33, 0xfc, 0x6f, 0x5e, 0xda, 0xb8, 0x33, 0x62, 0x5c, 0xc6, 0xdb, 0x2e, 0x32, 0x55, 0x47, 0x6d, 0xd6, 0xe5, 0x31, 0x62, 0x32, 0xfb, 0xd4, 0x23, 0x31, 0x20, 0x30, 0x3b, 0xd6, 0x4b, 0x4f, 0x75, 0x2e, 0x17, 0xd5, 0x82, 0x5d, 0x2d, 0x2b, 0xe9, 0x24, 0x01, 0xd5, 0x53, 0xe1, 0xed, 0x21, 0xf1, 0xdf, 0x60, 0xe3, 0xff, 0x22, 0x65, 0xe3, 0x4d, 0xe3, 0x74, 0x25, 0x00, 0xe3, 0x6d, 0xdf, 0xfd, 0x27, 0xe8, 0xe0, 0x37, 0xd5, 0x24, 0x29, 0x73, 0xde, 0xcc, 0xcb, 0x6f, 0x2a, 0x64, 0xdc, 0x0c, 0xc3, 0x5c, 0x2e, 0x6d, 0xd4, 0x01, 0xae, 0xe2, 0x2f, 0x1a, 0xce, 0xa2, 0x9f, 0x13, 0x2f, 0x28, 0xcc, 0x20, 0x91, 0x08, 0x2e, 0xaa, 0xcb, 0x65, 0x83, 0x6a, 0x2e, 0x26, 0xcc, 0xfb, 0x71, 0xde, 0x2d, 0x74, 0xce, 0xcb, 0x5f, 0x4b, 0x2d, 0x00, 0xd1, 0x81, 0x31, 0x6a, 0x2d, 0x1b, 0xd1, 0x49, 0x2f, 0x68, 0x2d, 0x35, 0xd1, 0x16, 0x2d, 0x87, 0x2d, 0x52, 0xd0, 0xe6, 0x2b, 0xc6, 0x25, 0x44, 0xce, 0x53, 0xe2, 0x2d, 0x26, 0x0e, 0xd5, 0xfb, 0xe2, 0x87, 0x24, 0x57, 0xe1, 0xaa, 0xe4, 0xd0, 0x25, 0x32, 0xe4, 0x41, 0xe3, 0xd4, 0x28, 0xc6, 0xe3, 0xae, 0xdc, 0xd6, 0x2b, 0x01, 0xe0, 0xd0, 0xd3, 0x2c, 0x2b, 0x0e, 0xdf, 0x86, 0xc9, 0x7f, 0x2f, 0x22, 0xd7, 0xcd, 0xb6, 0xc9, 0x2f, 0xf3, 0xd0, 0xd9, 0xa4, 0x54, 0x2f, 0x9b, 0xcc, 0x3e, 0x94, 0x28, 0x2e, 0xb1, 0xcb, 0x45, 0x81, 0x3b, 0x2d, 0x91, 0xcd, 0xa2, 0x6b, 0xb1, 0x30, 0xd5, 0xd1, 0x2b, 0x53, 0x93, 0x2e, 0xd0, 0xd2, 0x48, 0x31, 0xac, 0x2e, 0xe3, 0xd1, 0xfe, 0x2f, 0x3e, 0x2e, 0xf6, 0xd1, 0xba, 0x2d, 0x02, 0x51, 0x0e, 0xd9, 0x16, 0x24, 0xf7, 0x26, 0xe1, 0xcb, 0xa8, 0xe2, 0xfe, 0x27, 0xfd, 0xce, 0xfc, 0xe3, 0x60, 0x28, 0xfd, 0xd6, 0xe3, 0xe3, 0x58, 0x28, 0x06, 0xe3, 0xc4, 0xe5, 0x29, 0x29, 0xd8, 0xe4, 0x60, 0xe3, 0x28, 0x2c, 0x2b, 0xe3, 0xff, 0xd9, 0x65, 0x2f, 0x97, 0xe1, 0x1d, 0xcd, 0xef, 0x31, 0x19, 0xdd, 0x52, 0xc1, 0x26, 0x32, 0x8e, 0xd3, 0x96, 0xab, 0x52, 0x31, 0xba, 0xcf, 0x73, 0x96, 0x1a, 0x2e, 0xd2, 0xc8, 0xd6, 0x7e, 0x22, 0x31, 0x69, 0xcc, 0x62, 0x64, 0xfb, 0x32, 0x8d, 0xd5, 0x92, 0x41, 0xf4, 0x31, 0xe7, 0xd2, 0x26, 0x32, 0x7d, 0x49, 0x28, 0xd9, 0x84, 0x2c, 0xc3, 0x57, 0x59, 0xda, 0xa3, 0x28, 0x3b, 0x80, 0x07, 0xda, 0xde, 0x25, 0xea, 0x27, 0x8a, 0xc5, 0x8d, 0xe6, 0xa8, 0x29, 0x7f, 0xcc, 0x66, 0xe4, 0x1f, 0x2b, 0xa6, 0xcf, 0xfe, 0xe4, 0x6f, 0x2b, 0x87, 0xdb, 0x50, 0xe6, 0x73, 0x2e, 0x04, 0xe3, 0xf3, 0xe4, 0xd2, 0x34, 0x53, 0xe3, 0x8b, 0xe0, 0x72, 0x3d, 0x1c, 0xe5, 0xa5, 0xd8, 0xdb, 0x53, 0x3b, 0xe4, 0x35, 0xcd, 0x4e, 0x5b, 0x40, 0xe2, 0xd3, 0xbd, 0xf7, 0x5d, 0x3e, 0xe0, 0x39, 0xaa, 0x26, 0x5f, 0x78, 0xdf, 0x82, 0x90, 0xd0, 0x5a, 0x70, 0xe0, 0x09, 0x72, 0xd0, 0x5c, 0xe3, 0xdf, 0x57, 0x53, 0x9c, 0x5b, 0xc7, 0xdc, 0xcd, 0x38, 0x0b, 0x72, 0xac, 0xe0, 0x3f, 0x2e, 0x69, 0x83, 0xba, 0xde, 0x8b, 0x2a, 0x8f, 0x8a, 0x73, 0xda, 0xcd, 0x26, 0xf4, 0x2a, 0x6a, 0xbd, 0xa9, 0xe9, 0x5e, 0x2b, 0x71, 0xc4, 0x03, 0xe8, 0x70, 0x2d, 0x79, 0xcb, 0x8a, 0xe6, 0xbf, 0x34, 0x75, 0xd0, 0xae, 0xe4, 0x88, 0x4b, 0xb3, 0xdd, 0x93, 0xe6, 0x8f, 0x58, 0x35, 0xdf, 0xc2, 0xe0, 0x5b, 0x63, 0x09, 0xdf, 0x6f, 0xd9, 0xb2, 0x71, 0x1c, 0xe0, 0xc2, 0xcf, 0x7f, 0x77, 0x6f, 0xe3, 0xd9, 0xc5, 0x0f, 0x79, 0xf3, 0xe3, 0xfc, 0xb2, 0x7f, 0x7f, 0xa6, 0xdf, 0x81, 0x9b, 0xac, 0x82, 0x9a, 0xde, 0xb9, 0x82, 0xc8, 0x82, 0x67, 0xe3, 0x21, 0x65, 0x05, 0x8a, 0x9e, 0xe0, 0x26, 0x4d, 0x94, 0x8e, 0x16, 0xdf, 0x14, 0x38, 0xc0, 0x9a, 0x8f, 0xdb, 0xc3, 0x2a, 0xcd, 0xa3, 0x06, 0xdd, 0x22, 0x25, 0xd3, 0x2e, 0x9c, 0xb6, 0x07, 0xe6, 0xd7, 0x30, 0x37, 0xb9, 0x2d, 0xe8, 0x63, 0x33, 0x22, 0xc2, 0x2e, 0xe9, 0x9b, 0x59, 0x93, 0xcc, 0x5c, 0xe5, 0xa2, 0x5c, 0xce, 0xd3, 0x84, 0xe6, 0x2b, 0x6c, 0x54, 0xda, 0x51, 0xe1, 0x73, 0x7a, 0x58, 0xdb, 0x82, 0xdb, 0x17, 0x84, 0xfb, 0xde, 0xcc, 0xd3, 0x3a, 0x8f, 0x43, 0xe2, 0x94, 0xc8, 0xe3, 0x94, 0x6e, 0xe0, 0x08, 0xb6, 0x6e, 0x98, 0x67, 0xde, 0x49, 0xa1, 0xcf, 0x9a, 0x8a, 0xdd, 0x9f, 0x89, 0xf4, 0x9e, 0x5c, 0xdc, 0x6f, 0x72, 0x47, 0xa2, 0x01, 0xdb, 0xb3, 0x5a, 0x40, 0xa5, 0xbf, 0xd9, 0x21, 0x41, 0xc5, 0xa9, 0xe9, 0xda, 0xeb, 0x2f, 0x77, 0xab, 0xef, 0xd8, 0xfb, 0x25, 0x21, 0x31, 0x94, 0xa9, 0x22, 0xe4, 0x56, 0x35, 0xf5, 0xaf, 0xa7, 0xe6, 0x0e, 0x53, 0x58, 0xb7, 0xdf, 0xe6, 0xed, 0x5e, 0x71, 0xbf, 0x59, 0xe5, 0x27, 0x6b, 0xf5, 0xc6, 0x23, 0xdf, 0xb4, 0x7d, 0xaa, 0xca, 0x78, 0xda, 0x20, 0x8b, 0x0d, 0xd2, 0xc1, 0xd6, 0x78, 0x99, 0x04, 0xd6, 0x42, 0xd3, 0x4b, 0xa7, 0xf3, 0xde, 0x1f, 0xcd, 0xb6, 0xac, 0x92, 0xdd, 0x33, 0xbb, 0x25, 0xb0, 0x0e, 0xdc, 0x44, 0xa7, 0xf7, 0xb2, 0x54, 0xdb, 0xd4, 0x93, 0x0d, 0xb4, 0x63, 0xdc, 0x16, 0x7d, 0xc7, 0xb4, 0x68, 0xd7, 0xf7, 0x62, 0xd8, 0xb5, 0x27, 0xd5, 0xb6, 0x4a, 0xa1, 0xb6, 0x67, 0xd8, 0xf5, 0x35, 0x1b, 0xb8, 0x30, 0xd7, 0xed, 0x2b, 0xca, 0x33, 0xe2, 0x9e, 0xd4, 0xe4, 0x99, 0x4e, 0x06, 0xa4, 0x8b, 0xe4, 0x57, 0x5c, 0x6b, 0xac, 0xfd, 0xe4, 0xd2, 0x6d, 0x0a, 0xb3, 0xc3, 0xe2, 0xcb, 0x79, 0x5d, 0xb7, 0xad, 0xdd, 0x08, 0x89, 0xd5, 0xc0, 0x8f, 0xd7, 0x57, 0x9a, 0x3b, 0xc5, 0xea, 0xd4, 0x8c, 0xa9, 0xf1, 0xcd, 0x26, 0xce, 0xc1, 0xb6, 0x99, 0xd3, 0xae, 0xca, 0x0f, 0xbb, 0xb2, 0xd3, 0xdd, 0xb9, 0x5f, 0xbd, 0xee, 0xd3, 0xce, 0xa6, 0x34, 0xc0, 0x54, 0xd3, 0x6b, 0x92, 0x5c, 0xc1, 0x7b, 0xd4, 0x60, 0x7d, 0xa7, 0xc3, 0x3f, 0xd5, 0x9c, 0x68, 0x05, 0xc4, 0xde, 0xd7, 0x24, 0x53, 0xdb, 0xc4, 0x40, 0xd7, 0x79, 0x3f, 0x1e, 0xc4, 0x8e, 0xd5, 0xc0, 0x2f, 0x12, 0x4c, 0x3d, 0x93, 0x7b, 0xe4, 0x75, 0x5d, 0x03, 0x98, 0x5c, 0xe3, 0x9c, 0x6b, 0x7d, 0xa0, 0x60, 0xe5, 0x37, 0x7d, 0xb8, 0xa7, 0x50, 0xe4, 0x31, 0x8b, 0x64, 0xac, 0xa9, 0xde, 0x54, 0x9b, 0x2d, 0xb4, 0x96, 0xd9, 0x53, 0xa9, 0xd8, 0xbb, 0xc2, 0xd4, 0xcb, 0xbb, 0x0e, 0xc2, 0xcf, 0xce, 0x4d, 0xc9, 0xae, 0xc9, 0xae, 0xca, 0x3d, 0xc9, 0x6c, 0xca, 0x9c, 0xb7, 0xbe, 0xca, 0xe8, 0xca, 0xf5, 0xa4, 0x9d, 0xcc, 0x61, 0xcb, 0xa2, 0x91, 0x50, 0xcd, 0x59, 0xcc, 0xe2, 0x7d, 0xf4, 0xce, 0x7c, 0xce, 0xb4, 0x6a, 0x57, 0xce, 0xe3, 0xd0, 0x78, 0x58, 0x0d, 0xcf, 0xb1, 0xd2, 0x35, 0x45, 0xf1, 0xd0, 0xa4, 0xd3, 0x7a, 0x33, 0x80, 0x5b, 0x98, 0x86, 0x8f, 0xe6, 0x24, 0x6a, 0x92, 0x8a, 0xe6, 0xe4, 0x05, 0x7f, 0xe1, 0x93, 0x5f, 0xe6, 0x2e, 0x8d, 0xd8, 0x9c, 0x6d, 0xe5, 0xe4, 0x9e, 0x3b, 0xa3, 0xe7, 0xe1, 0x1e, 0xad, 0xb3, 0xab, 0x34, 0xdb, 0x25, 0xba, 0xb5, 0xb2, 0xce, 0xd6, 0x47, 0xc8, 0x2e, 0xb9, 0xa5, 0xce, 0x64, 0xd0, 0x5e, 0xbf, 0xb5, 0xc7, 0x72, 0xd5, 0xab, 0xc1, 0x31, 0xb7, 0x20, 0xd7, 0x44, 0xc1, 0x80, 0xa4, 0x18, 0xd7, 0x82, 0xc1, 0xa1, 0x90, 0xc5, 0xd7, 0xfe, 0xc2, 0x42, 0x7d, 0x36, 0xd6, 0x4e, 0xc1, 0x0d, 0x68, 0x20, 0xd4, 0x81, 0xc0, 0xc3, 0x53, 0xc7, 0xd2, 0xd7, 0xbf, 0xfc, 0x3f, 0x1f, 0xd1, 0xe5, 0xc2, 0x6a, 0x30, 0xd1, 0x6d, 0xdc, 0x7a, 0x75, 0xe8, 0x41, 0x82, 0x73, 0x81, 0x8d, 0xe7, 0x22, 0x8a, 0x88, 0x89, 0x3b, 0xe6, 0xa4, 0x9d, 0xad, 0x92, 0xb5, 0xe6, 0xb5, 0xb1, 0x31, 0x9d, 0x17, 0xe4, 0xb9, 0xbc, 0x79, 0xa3, 0x6e, 0xdf, 0x64, 0xcb, 0x6f, 0xab, 0x6d, 0xd9, 0xd0, 0xd2, 0x76, 0xb2, 0x15, 0xd3, 0x73, 0xdc, 0x44, 0xb7, 0x05, 0xc8, 0xc4, 0xde, 0xda, 0xb5, 0xe8, 0xb5, 0x93, 0xdd, 0x2e, 0xb3, 0x84, 0xa0, 0x21, 0xd9, 0xf0, 0xaf, 0xdd, 0x89, 0x94, 0xd8, 0x17, 0xad, 0x65, 0x74, 0x78, 0xd6, 0x15, 0xab, 0xb3, 0x5f, 0x82, 0xd4, 0xeb, 0xab, 0xed, 0x4a, 0xd5, 0xd3, 0xdf, 0xab, 0xd2, 0x36, 0x9a, 0xd3, 0x0a, 0xb2, 0x5d, 0x30, 0x06, 0x81, 0xc0, 0x71, 0x59, 0xeb, 0x69, 0x8b, 0x07, 0x77, 0xe6, 0xeb, 0x84, 0x98, 0x1e, 0x7e, 0x4c, 0xea, 0x1c, 0xae, 0x51, 0x8a, 0x80, 0xe9, 0xa1, 0xc0, 0x8f, 0x95, 0x2f, 0xeb, 0x6d, 0xce, 0x25, 0x9d, 0xad, 0xe5, 0xff, 0xd5, 0xa0, 0xa1, 0xcd, 0xdc, 0x36, 0xd9, 0x64, 0xa7, 0x39, 0xd6, 0x0e, 0xde, 0xd0, 0xa7, 0x02, 0xc3, 0x0a, 0xde, 0x1b, 0xa3, 0x88, 0xac, 0x31, 0xdc, 0x40, 0xa0, 0x3f, 0x96, 0x51, 0xd9, 0x5a, 0x9c, 0x6b, 0x80, 0x51, 0xd7, 0xcb, 0x9a, 0xa0, 0x6b, 0x92, 0xd5, 0x50, 0x98, 0xa4, 0x56, 0x5a, 0xd4, 0x68, 0x99, 0x5d, 0x42, 0x57, 0xd3, 0x91, 0x9b, 0xef, 0x33, 0x50, 0xd3, 0x5a, 0xa1, 0x2f, 0x2e, 0x61, 0x8d, 0xfc, 0x63, 0xf7, 0xed, 0x65, 0x99, 0x7b, 0x6d, 0x10, 0xeb, 0x3f, 0xa8, 0x5c, 0x71, 0x5e, 0xea, 0x35, 0xb9, 0x9b, 0x7e, 0x8d, 0xe9, 0xbb, 0xcb, 0x95, 0x89, 0x06, 0xeb, 0x13, 0xd5, 0x41, 0x8e, 0x8e, 0xdf, 0x9f, 0xd7, 0x82, 0x93, 0x85, 0xd9, 0xfc, 0xdb, 0x08, 0x96, 0xed, 0xd0, 0x2c, 0xdc, 0xb0, 0x94, 0x9f, 0xb9, 0x42, 0xdc, 0xfa, 0x91, 0x3b, 0xa2, 0xc8, 0xdb, 0x03, 0x8d, 0xac, 0x8c, 0x4a, 0xd9, 0x5c, 0x8b, 0xd2, 0x78, 0x47, 0xd6, 0xd7, 0x89, 0x26, 0x62, 0x75, 0xd5, 0x8c, 0x88, 0x99, 0x4c, 0xcd, 0xd4, 0x3e, 0x8a, 0x28, 0x38, 0x4a, 0xd3, 0x47, 0x8f, 0xf5, 0x30, 0xc8, 0xd3, 0xb7, 0x94, 0xd3, 0x2d, 0xcd, 0x9e, 0xbb, 0x5f, 0x2a, 0xec, 0x73, 0xa9, 0x83, 0x63, 0x02, 0xe9, 0x21, 0xb7, 0x30, 0x65, 0x6a, 0xe9, 0x08, 0xc4, 0xcb, 0x72, 0x04, 0xe8, 0x67, 0xd3, 0xae, 0x7b, 0xa3, 0xe8, 0x1a, 0xd6, 0xd0, 0x7e, 0x79, 0xdc, 0x60, 0xd8, 0x1d, 0x82, 0xce, 0xd8, 0xc4, 0xdb, 0xd2, 0x84, 0xdd, 0xcb, 0x41, 0xdc, 0xed, 0x80, 0x03, 0xb2, 0x13, 0xdd, 0x8a, 0x7d, 0xa3, 0x9b, 0xbf, 0xdb, 0xe7, 0x7a, 0x1e, 0x84, 0xb7, 0xda, 0xa3, 0x78, 0x60, 0x70, 0x2a, 0xd9, 0x46, 0x78, 0x1c, 0x5a, 0xed, 0xd7, 0xbe, 0x7a, 0xcc, 0x47, 0x86, 0xd6, 0xb1, 0x7d, 0x36, 0x34, 0xd4, 0xd4, 0xed, 0x84, 0x38, 0x30, 0x00, 0xd3, 0x9e, 0x8b, 0xf1, 0x2c, 0x49, 0xa8, 0x66, 0x4c, 0x4d, 0xed, 0x18, 0xb6, 0xd2, 0x5c, 0xd3, 0xe9, 0x42, 0xc2, 0xf5, 0x61, 0x73, 0xe9, 0xac, 0xd1, 0x4d, 0x63, 0xa5, 0xe9, 0xa6, 0xd7, 0x6a, 0x67, 0xdb, 0xdf, 0x89, 0xd7, 0xff, 0x6d, 0x07, 0xdb, 0xae, 0xd8, 0xd1, 0x71, 0x63, 0xd7, 0xbd, 0xdc, 0xb2, 0x70, 0xf1, 0xc5, 0xb6, 0xdd, 0xf7, 0x69, 0x57, 0xa8, 0x91, 0xdd, 0xdb, 0x67, 0x21, 0x92, 0x69, 0xdd, 0x56, 0x66, 0x32, 0x7f, 0x47, 0xda, 0xb2, 0x66, 0x6b, 0x69, 0xff, 0xde, 0x43, 0x63, 0xa9, 0x54, 0x36, 0xdd, 0xbe, 0x67, 0x5a, 0x3e, 0xb9, 0xd9, 0x7c, 0x6e, 0xcf, 0x33, 0x58, 0xd7, 0x76, 0x77, 0xc1, 0x2f, 0x09, 0xd5, 0x76, 0x80, 0x05, 0x2b, 0xca, 0xb2, 0xb5, 0x39, 0xca, 0xef, 0x82, 0xc7, 0x7f, 0x4b, 0xcc, 0xed, 0x75, 0xcd, 0x74, 0x50, 0x8b, 0xeb, 0x72, 0xd7, 0x00, 0x55, 0xab, 0xde, 0x93, 0xd7, 0xc7, 0x5d, 0xc2, 0xdd, 0xa1, 0xd9, 0x2f, 0x5e, 0x8b, 0xdb, 0x78, 0xd9, 0xbd, 0x61, 0x96, 0xd7, 0xc4, 0xdd, 0x72, 0x5e, 0x91, 0xc1, 0x07, 0xde, 0xdd, 0x57, 0x2c, 0xa4, 0xa7, 0xde, 0x30, 0x53, 0xb7, 0x8d, 0xae, 0xdf, 0x0e, 0x50, 0xa9, 0x79, 0xee, 0xe1, 0xfa, 0x4a, 0xc2, 0x64, 0x54, 0xe0, 0x65, 0x49, 0xc3, 0x50, 0x35, 0xde, 0x52, 0x4a, 0xef, 0x37, 0xaf, 0xdc, 0x99, 0x60, 0x09, 0x32, 0x62, 0xd9, 0x1d, 0x67, 0x51, 0x2e, 0x8d, 0xd5, 0x63, 0x72, 0xac, 0x2b, 0x8b, 0xbb, 0xbd, 0x36, 0xe3, 0xea, 0xb6, 0xd2, 0x11, 0x3e, 0x58, 0xf0, 0x52, 0xd8, 0x72, 0x3b, 0xd7, 0xe5, 0x11, 0xd8, 0xea, 0x49, 0x2d, 0xdd, 0xa3, 0xda, 0x19, 0x4c, 0xa0, 0xdc, 0x2e, 0xdb, 0xb7, 0x51, 0x5a, 0xda, 0xab, 0xdc, 0x0e, 0x56, 0x45, 0xd3, 0x15, 0xdf, 0x8e, 0x4a, 0x1c, 0xb9, 0xfe, 0xdd, 0xf1, 0x3c, 0x7b, 0x98, 0x44, 0xdd, 0x0f, 0x36, 0xb1, 0x82, 0xf8, 0xdc, 0x4d, 0x35, 0x33, 0x6f, 0xf0, 0xdb, 0x7a, 0x34, 0xba, 0x5d, 0xa1, 0xda, 0xee, 0x35, 0x8a, 0x4b, 0x5a, 0xd8, 0x00, 0x32, 0x8a, 0x33, 0xd3, 0xd4, 0xfe, 0x32, 0x03, 0x30, 0xe3, 0xd8, 0x10, 0x5b, 0x72, 0x2e, 0x8e, 0xd6, 0x29, 0x61, 0xf6, 0x2c, 0x64, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0xa8, 0x80, 0xff, 0xff, 0xb1, 0x8f, 0x6d, 0x42, 0x41, 0x20, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x98, 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x01, 0x40, 0x00, 0x00, 0x01, 0x84, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x97, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xfe, 0x85, 0x60, 0xff, 0xff, 0xb3, 0xfa, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xad, 0x0c, 0x00, 0x00, 0x1b, 0xb0, 0x00, 0x00, 0x0d, 0xaa, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x2a, 0x00, 0x00, 0x1c, 0x07, 0x00, 0x00, 0x0e, 0x2c, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x47, 0x00, 0x00, 0x1a, 0x49, 0x00, 0x00, 0x0b, 0xb0, 0x00, 0x00, 0x14, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x66, 0xf0, 0x55, 0x90, 0x22, 0x59, 0x4e, 0xfa, 0x58, 0x7c, 0xf5, 0x33, 0x17, 0xf7, 0xfd, 0x13, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xdb, 0xff, 0xff, 0x00, 0x00, 0x2d, 0x24, 0xe8, 0x08, 0x02, 0xec, 0xff, 0xff, 0xb1, 0x05, 0xa7, 0x83, 0x0a, 0xcb, 0x99, 0x0f, 0xaa, 0x6f, 0xdd, 0xa6, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x0c, 0x12, 0x9c, 0xff, 0xfb, 0x21, 0x8d, 0x00, 0x89, 0x34, 0x39, 0x00, 0x00, 0x67, 0x52, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xc8, 0xaa, 0x3d, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x06, 0xbe, 0x75, 0xff, 0xfd, 0xbb, 0x45, 0x00, 0x4c, 0xa4, 0xf7, 0x00, 0x00, 0x56, 0x3e, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xe6, 0x38, 0x17, 0x70, 0x61, 0x72, 0x61, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xab, 0x00, 0x03, 0xee, 0x3c, 0xff, 0xff, 0x75, 0x4e, 0x00, 0x2c, 0xab, 0xba, 0x00, 0x00, 0x23, 0x83, 0xff, 0xff, 0xf1, 0xec, 0xff, 0xf9, 0xd7, 0xc3, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x72, 0x6d, 0x67, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x6d, 0x6c, 0x75, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x0c, 0x65, 0x6e, 0x55, 0x53, 0x00, 0x00, 0x00, 0x5a, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x70, 0x00, 0x79, 0x00, 0x72, 0x00, 0x69, 0x00, 0x67, 0x00, 0x68, 0x00, 0x74, 0x00, 0x20, 0x00, 0x32, 0x00, 0x30, 0x00, 0x30, 0x00, 0x37, 0x00, 0x20, 0x00, 0x49, 0x00, 0x6e, 0x00, 0x74, 0x00, 0x65, 0x00, 0x72, 0x00, 0x6e, 0x00, 0x61, 0x00, 0x74, 0x00, 0x69, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x61, 0x00, 0x6c, 0x00, 0x20, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6c, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x20, 0x00, 0x43, 0x00, 0x6f, 0x00, 0x6e, 0x00, 0x73, 0x00, 0x6f, 0x00, 0x72, 0x00, 0x74, 0x00, 0x69, 0x00, 0x75, 0x00, 0x6d, 0x00, 0x00, 0x73, 0x66, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x0c, 0x4b, 0x00, 0x00, 0x05, 0xe4, 0xff, 0xff, 0xf3, 0x28, 0x00, 0x00, 0x07, 0x9c, 0x00, 0x00, 0xfd, 0x87, 0xff, 0xff, 0xfb, 0xa1, 0xff, 0xff, 0xfd, 0xa3, 0x00, 0x00, 0x02, 0xa2, 0x00, 0x00, 0xc0, 0x8c }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (GetImageProfile(image,"icm") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icm",profile); profile=DestroyStringInfo(profile); return(status); } #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(LCMS_VERSION) && (LCMS_VERSION >= 2000) static void LCMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { Image *image; (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); image=(Image *) context; if (image != (Image *) NULL) (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageWarning,"UnableToTransformColorspace","`%s'",image->filename); } #else static int LCMSExceptionHandler(int severity,const char *message) { (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%d, %s", severity,message != (char *) NULL ? message : "no message"); return(1); } #endif #endif MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length, const MagickBooleanType magick_unused(clone)) { #define ProfileImageTag "Profile/Image" #define ThrowProfileException(severity,tag,context) \ { \ if (source_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_profile); \ if (target_profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char **arguments, *names; int number_arguments; register ssize_t i; /* Delete image profile(s). */ names=ConstantString(name); (void) SubstituteString(&names,","," "); arguments=StringToArgv(names,&number_arguments); names=DestroyString(names); if (arguments == (char **) NULL) return(MagickTrue); ResetImageProfileIterator(image); for (name=GetNextImageProfile(image); name != (const char *) NULL; ) { for (i=1; i < (ssize_t) number_arguments; i++) { if ((*arguments[i] == '!') && (LocaleCompare(name,arguments[i]+1) == 0)) break; if (GlobExpression(name,arguments[i],MagickTrue) != MagickFalse) { (void) DeleteImageProfile(image,name); ResetImageProfileIterator(image); break; } } name=GetNextImageProfile(image); } for (i=0; i < (ssize_t) number_arguments; i++) arguments[i]=DestroyString(arguments[i]); arguments=(char **) RelinquishMagickMemory(arguments); return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace"); if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image); value=GetImageProperty(image,"exif:InteroperabilityIndex"); if (LocaleCompare(value,"R03.") != 0) (void) SetAdobeRGB1998ImageProfile(image); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(&image->exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (LCMS)", image->filename); #else { cmsHPROFILE source_profile; /* Transform pixel colors as defined by the color profiles. */ cmsSetLogErrorHandler(LCMSExceptionHandler); source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); if ((cmsGetDeviceClass(source_profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile); else { CacheView *image_view; ColorspaceType source_colorspace, target_colorspace; cmsColorSpaceSignature signature; cmsHPROFILE target_profile; cmsHTRANSFORM *restrict transform; cmsUInt32Number flags, source_type, target_type; ExceptionInfo *exception; int intent; MagickBooleanType status; MagickOffsetType progress; size_t source_channels, target_channels; ssize_t y; unsigned short **restrict source_pixels, **restrict target_pixels; exception=(&image->exception); target_profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_profile=source_profile; source_profile=cmsOpenProfileFromMemTHR(image, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } switch (cmsGetColorSpace(source_profile)) { case cmsSigCmykData: { source_colorspace=CMYKColorspace; source_type=(cmsUInt32Number) TYPE_CMYK_16; source_channels=4; break; } case cmsSigGrayData: { source_colorspace=GRAYColorspace; source_type=(cmsUInt32Number) TYPE_GRAY_16; source_channels=1; break; } case cmsSigLabData: { source_colorspace=LabColorspace; source_type=(cmsUInt32Number) TYPE_Lab_16; source_channels=3; break; } case cmsSigLuvData: { source_colorspace=YUVColorspace; source_type=(cmsUInt32Number) TYPE_YUV_16; source_channels=3; break; } case cmsSigRgbData: { source_colorspace=RGBColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } case cmsSigXYZData: { source_colorspace=XYZColorspace; source_type=(cmsUInt32Number) TYPE_XYZ_16; source_channels=3; break; } case cmsSigYCbCrData: { source_colorspace=YCbCrColorspace; source_type=(cmsUInt32Number) TYPE_YCbCr_16; source_channels=3; break; } default: { source_colorspace=UndefinedColorspace; source_type=(cmsUInt32Number) TYPE_RGB_16; source_channels=3; break; } } signature=cmsGetPCS(source_profile); if (target_profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_profile); switch (signature) { case cmsSigCmykData: { target_colorspace=CMYKColorspace; target_type=(cmsUInt32Number) TYPE_CMYK_16; target_channels=4; break; } case cmsSigLabData: { target_colorspace=LabColorspace; target_type=(cmsUInt32Number) TYPE_Lab_16; target_channels=3; break; } case cmsSigGrayData: { target_colorspace=GRAYColorspace; target_type=(cmsUInt32Number) TYPE_GRAY_16; target_channels=1; break; } case cmsSigLuvData: { target_colorspace=YUVColorspace; target_type=(cmsUInt32Number) TYPE_YUV_16; target_channels=3; break; } case cmsSigRgbData: { target_colorspace=RGBColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } case cmsSigXYZData: { target_colorspace=XYZColorspace; target_type=(cmsUInt32Number) TYPE_XYZ_16; target_channels=3; break; } case cmsSigYCbCrData: { target_colorspace=YCbCrColorspace; target_type=(cmsUInt32Number) TYPE_YCbCr_16; target_channels=3; break; } default: { target_colorspace=UndefinedColorspace; target_type=(cmsUInt32Number) TYPE_RGB_16; target_channels=3; break; } } if ((source_colorspace == UndefinedColorspace) || (target_colorspace == UndefinedColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == GRAYColorspace) && (IsGrayImage(image,exception) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == CMYKColorspace) && (image->colorspace != CMYKColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == XYZColorspace) && (image->colorspace != XYZColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace == YCbCrColorspace) && (image->colorspace != YCbCrColorspace)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); if ((source_colorspace != CMYKColorspace) && (source_colorspace != GRAYColorspace) && (source_colorspace != LabColorspace) && (source_colorspace != XYZColorspace) && (source_colorspace != YCbCrColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse)) ThrowProfileException(ImageError,"ColorspaceColorProfileMismatch", name); switch (image->rendering_intent) { case AbsoluteIntent: intent=INTENT_ABSOLUTE_COLORIMETRIC; break; case PerceptualIntent: intent=INTENT_PERCEPTUAL; break; case RelativeIntent: intent=INTENT_RELATIVE_COLORIMETRIC; break; case SaturationIntent: intent=INTENT_SATURATION; break; default: intent=INTENT_PERCEPTUAL; break; } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(image,source_profile, source_type,target_profile,target_type,intent,flags); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_pixels=AcquirePixelThreadSet(image->columns,source_channels); target_pixels=AcquirePixelThreadSet(image->columns,target_channels); if ((source_pixels == (unsigned short **) NULL) || (target_pixels == (unsigned short **) NULL)) { transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass) == MagickFalse) { target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (source_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); return(MagickFalse); } if (target_colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_colorspace); status=MagickTrue; progress=0; image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; register unsigned short *p; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); p=source_pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=ScaleQuantumToShort(GetPixelRed(q)); if (source_channels > 1) { *p++=ScaleQuantumToShort(GetPixelGreen(q)); *p++=ScaleQuantumToShort(GetPixelBlue(q)); } if (source_channels > 3) *p++=ScaleQuantumToShort(GetPixelIndex(indexes+x)); q++; } cmsDoTransform(transform[id],source_pixels[id],target_pixels[id], (unsigned int) image->columns); p=target_pixels[id]; q-=image->columns; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,ScaleShortToQuantum(*p)); SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); p++; if (target_channels > 1) { SetPixelGreen(q,ScaleShortToQuantum(*p)); p++; SetPixelBlue(q,ScaleShortToQuantum(*p)); p++; } if (target_channels > 3) { SetPixelIndex(indexes+x,ScaleShortToQuantum(*p)); p++; } q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ProfileImage) #endif proceed=SetImageProgress(image,ProfileImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_colorspace); switch (signature) { case cmsSigRgbData: { image->type=image->matte == MagickFalse ? TrueColorType : TrueColorMatteType; break; } case cmsSigCmykData: { image->type=image->matte == MagickFalse ? ColorSeparationType : ColorSeparationMatteType; break; } case cmsSigGrayData: { image->type=image->matte == MagickFalse ? GrayscaleType : GrayscaleMatteType; break; } default: break; } target_pixels=DestroyPixelThreadSet(target_pixels); source_pixels=DestroyPixelThreadSet(source_pixels); transform=DestroyTransformThreadSet(transform); if (cmsGetDeviceClass(source_profile) != cmsSigLinkClass) status=SetImageProfile(image,name,profile); if (target_profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_profile); } (void) cmsCloseProfile(source_profile); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); if (LocaleCompare(name,"icc") == 0) { /* Continue to support deprecated color profile for now. */ image->color_profile.length=0; image->color_profile.info=(unsigned char *) NULL; } if (LocaleCompare(name,"iptc") == 0) { /* Continue to support deprecated IPTC profile for now. */ image->iptc_profile.length=0; image->iptc_profile.info=(unsigned char *) NULL; } profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceBytes(const unsigned char *p, const ssize_t count,unsigned char *quantum) { register ssize_t i; for (i=0; i < count; i++) *quantum++=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, size_t *quantum) { *quantum=(size_t) (*p++ << 24); *quantum|=(size_t) (*p++ << 16); *quantum|=(size_t) (*p++ << 8); *quantum|=(size_t) (*p++ << 0); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++ << 8); *quantum|=(unsigned short) (*p++ << 0); return(p); } static MagickBooleanType GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block) { const unsigned char *datum; register const unsigned char *p; size_t length; StringInfo *profile; unsigned char length_byte; size_t count; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&count); if ((p > (datum+length-count)) || (count > length)) break; switch (id) { case 0x03ed: { unsigned short resolution; /* Resolution. */ p=ReadResourceShort(p,&resolution)+6; image->x_resolution=(double) resolution; p=ReadResourceShort(p,&resolution)+6; image->y_resolution=(double) resolution; break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"iptc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"icc",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"exif",profile); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfile(image,"xmp",profile); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(MagickTrue); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile) { char key[MaxTextExtent], property[MaxTextExtent]; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MaxTextExtent); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),CloneStringInfo(profile)); if ((status != MagickFalse) && ((LocaleCompare(name,"icc") == 0) || (LocaleCompare(name,"icm") == 0))) { const StringInfo *icc_profile; /* Continue to support deprecated color profile member. */ icc_profile=GetImageProfile(image,name); if (icc_profile != (const StringInfo *) NULL) { image->color_profile.length=GetStringInfoLength(icc_profile); image->color_profile.info=GetStringInfoDatum(icc_profile); } } if ((status != MagickFalse) && ((LocaleCompare(name,"iptc") == 0) || (LocaleCompare(name,"8bim") == 0))) { const StringInfo *iptc_profile; /* Continue to support deprecated IPTC profile member. */ iptc_profile=GetImageProfile(image,name); if (iptc_profile != (const StringInfo *) NULL) { image->iptc_profile.length=GetStringInfoLength(iptc_profile); image->iptc_profile.info=GetStringInfoDatum(iptc_profile); } (void) GetProfilesFromResourceBlock(image,profile); } /* Inject profile into image properties. */ (void) FormatLocaleString(property,MaxTextExtent,"%s:sans",name); (void) GetImageProperty(image,property); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline unsigned short ReadProfileShort(const EndianType endian, unsigned char *buffer) { unsigned short value; if (endian == MSBEndian) { value=(unsigned short) ((((unsigned char *) buffer)[0] << 8) | ((unsigned char *) buffer)[1]); return((unsigned short) (value & 0xffff)); } value=(unsigned short) ((buffer[1] << 8) | buffer[0]); return((unsigned short) (value & 0xffff)); } static inline size_t ReadProfileLong(const EndianType endian, unsigned char *buffer) { size_t value; if (endian == MSBEndian) { value=(size_t) ((buffer[0] << 24) | (buffer[1] << 16) | (buffer[2] << 8) | buffer[3]); return((size_t) (value & 0xffffffff)); } value=(size_t) ((buffer[3] << 24) | (buffer[2] << 16) | (buffer[1] << 8 ) | (buffer[0])); return((size_t) (value & 0xffffffff)); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == MSBEndian) { buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,4); return; } buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) CopyMagickMemory(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == MSBEndian) { buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) CopyMagickMemory(p,buffer,2); return; } buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) CopyMagickMemory(p,buffer,2); } MagickExport MagickBooleanType SyncImageProfiles(Image *image) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, length, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; StringInfo *profile; unsigned char *directory, *exif; /* Set EXIF resolution tag. */ profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile == (StringInfo *) NULL) return(MagickTrue); length=GetStringInfoLength(profile); exif=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ((int) ReadProfileLong(endian,exif+4)); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; register unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format-1) >= EXIF_NUM_FORMATS) break; components=(int) ReadProfileLong(endian,q+4); number_bytes=(size_t) components*format_bytes[format]; if (number_bytes <= 4) p=q+8; else { ssize_t offset; /* The directory entry contains an offset. */ offset=(ssize_t) ((int) ReadProfileLong(endian,q+8)); if ((offset+number_bytes) < offset) continue; /* prevent overflow */ if ((size_t) (offset+number_bytes) > length) continue; p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->x_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->y_resolution+0.5),p); (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { ssize_t offset; offset=(ssize_t) ((int) ReadProfileLong(endian,p)); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ((int) ReadProfileLong(endian,directory+2+(12* number_entries))); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); }
distance.h
#pragma once #include <utils.h> #include <limits.h> #ifdef _WINDOWS #include <immintrin.h> #include <smmintrin.h> #include <tmmintrin.h> #include <intrin.h> #else #include <immintrin.h> #endif #include <cosine_similarity.h> #include <iostream> namespace { static inline __m128 _mm_mulhi_epi8(__m128i X) { __m128i zero = _mm_setzero_si128(); __m128i sign_x = _mm_cmplt_epi8(X, zero); __m128i xhi = _mm_unpackhi_epi8(X, sign_x); return _mm_cvtepi32_ps( _mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi))); } static inline __m128 _mm_mulhi_epi8_shift32(__m128i X) { __m128i zero = _mm_setzero_si128(); X = _mm_srli_epi64(X, 32); __m128i sign_x = _mm_cmplt_epi8(X, zero); __m128i xhi = _mm_unpackhi_epi8(X, sign_x); return _mm_cvtepi32_ps( _mm_add_epi32(_mm_setzero_si128(), _mm_madd_epi16(xhi, xhi))); } static inline __m128 _mm_mul_epi8(__m128i X, __m128i Y) { __m128i zero = _mm_setzero_si128(); __m128i sign_x = _mm_cmplt_epi8(X, zero); __m128i sign_y = _mm_cmplt_epi8(Y, zero); __m128i xlo = _mm_unpacklo_epi8(X, sign_x); __m128i xhi = _mm_unpackhi_epi8(X, sign_x); __m128i ylo = _mm_unpacklo_epi8(Y, sign_y); __m128i yhi = _mm_unpackhi_epi8(Y, sign_y); return _mm_cvtepi32_ps( _mm_add_epi32(_mm_madd_epi16(xlo, ylo), _mm_madd_epi16(xhi, yhi))); } static inline __m128 _mm_mul_epi8(__m128i X) { __m128i zero = _mm_setzero_si128(); __m128i sign_x = _mm_cmplt_epi8(X, zero); __m128i xlo = _mm_unpacklo_epi8(X, sign_x); __m128i xhi = _mm_unpackhi_epi8(X, sign_x); return _mm_cvtepi32_ps( _mm_add_epi32(_mm_madd_epi16(xlo, xlo), _mm_madd_epi16(xhi, xhi))); } static inline __m128 _mm_mul32_pi8(__m128i X, __m128i Y) { __m128i xlo = _mm_cvtepi8_epi16(X), ylo = _mm_cvtepi8_epi16(Y); return _mm_cvtepi32_ps( _mm_unpacklo_epi32(_mm_madd_epi16(xlo, ylo), _mm_setzero_si128())); } static inline __m256 _mm256_mul_epi8(__m256i X, __m256i Y) { __m256i zero = _mm256_setzero_si256(); __m256i sign_x = _mm256_cmpgt_epi8(zero, X); __m256i sign_y = _mm256_cmpgt_epi8(zero, Y); __m256i xlo = _mm256_unpacklo_epi8(X, sign_x); __m256i xhi = _mm256_unpackhi_epi8(X, sign_x); __m256i ylo = _mm256_unpacklo_epi8(Y, sign_y); __m256i yhi = _mm256_unpackhi_epi8(Y, sign_y); return _mm256_cvtepi32_ps(_mm256_add_epi32(_mm256_madd_epi16(xlo, ylo), _mm256_madd_epi16(xhi, yhi))); } static inline __m256 _mm256_mul32_pi8(__m128i X, __m128i Y) { __m256i xlo = _mm256_cvtepi8_epi16(X), ylo = _mm256_cvtepi8_epi16(Y); return _mm256_blend_ps(_mm256_cvtepi32_ps(_mm256_madd_epi16(xlo, ylo)), _mm256_setzero_ps(), 252); } static inline float _mm256_reduce_add_ps(__m256 x) { /* ( x3+x7, x2+x6, x1+x5, x0+x4 ) */ const __m128 x128 = _mm_add_ps(_mm256_extractf128_ps(x, 1), _mm256_castps256_ps128(x)); /* ( -, -, x1+x3+x5+x7, x0+x2+x4+x6 ) */ const __m128 x64 = _mm_add_ps(x128, _mm_movehl_ps(x128, x128)); /* ( -, -, -, x0+x1+x2+x3+x4+x5+x6+x7 ) */ const __m128 x32 = _mm_add_ss(x64, _mm_shuffle_ps(x64, x64, 0x55)); /* Conversion to float is a no-op on x86-64 */ return _mm_cvtss_f32(x32); } } // namespace namespace diskann { // enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 }; template<typename T> class Distance { public: virtual float compare(const T *a, const T *b, unsigned length) const = 0; virtual ~Distance() { } }; template<typename T> class DistanceCosine : public Distance<T> { float compare(const T *a, const T *b, unsigned length) const { return diskann::compute_cosine_similarity<T>(a, b, length); } }; class DistanceL2Int8 : public Distance<int8_t> { public: float compare(const int8_t *a, const int8_t *b, unsigned size) const { int32_t result = 0; #ifdef _WINDOWS #ifdef USE_AVX2 __m256 r = _mm256_setzero_ps(); char * pX = (char *) a, *pY = (char *) b; while (size >= 32) { __m256i r1 = _mm256_subs_epi8(_mm256_loadu_si256((__m256i *) pX), _mm256_loadu_si256((__m256i *) pY)); r = _mm256_add_ps(r, _mm256_mul_epi8(r1, r1)); pX += 32; pY += 32; size -= 32; } while (size > 0) { __m128i r2 = _mm_subs_epi8(_mm_loadu_si128((__m128i *) pX), _mm_loadu_si128((__m128i *) pY)); r = _mm256_add_ps(r, _mm256_mul32_pi8(r2, r2)); pX += 4; pY += 4; size -= 4; } r = _mm256_hadd_ps(_mm256_hadd_ps(r, r), r); return r.m256_f32[0] + r.m256_f32[4]; #else #pragma omp simd reduction(+ : result) aligned(a, b : 8) for (_s32 i = 0; i < (_s32) size; i++) { result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) * ((int32_t)((int16_t) a[i] - (int16_t) b[i])); } return (float) result; #endif #else #pragma omp simd reduction(+ : result) aligned(a, b : 8) for (_s32 i = 0; i < (_s32) size; i++) { result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) * ((int32_t)((int16_t) a[i] - (int16_t) b[i])); } return (float) result; #endif } }; class DistanceL2UInt8 : public Distance<uint8_t> { public: float compare(const uint8_t *a, const uint8_t *b, unsigned size) const { uint32_t result = 0; #ifndef _WINDOWS #pragma omp simd reduction(+ : result) aligned(a, b : 8) #endif for (_s32 i = 0; i < (_s32) size; i++) { result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) * ((int32_t)((int16_t) a[i] - (int16_t) b[i])); } return (float) result; } }; class AVX512DistanceL2Float : public Distance<float> { public: #ifndef _WINDOWS float compare(const float *a, const float *b, unsigned size) const __attribute__((hot)) { a = (const float *) __builtin_assume_aligned(a, 32); b = (const float *) __builtin_assume_aligned(b, 32); #else float compare(const float *a, const float *b, unsigned size) const { #endif float result = 0; #ifdef USE_AVX512 // assume size is divisible by 16 _u16 niters = size / 16; __m512 sum = _mm512_setzero_ps(); for (_u16 j = 0; j < niters; j++) { // scope is a[16j:16j+15], b[16j:16j+15] // load a_vec if (j < (niters - 1)) { _mm_prefetch((char *) (a + 16 * (j + 1)), _MM_HINT_T0); _mm_prefetch((char *) (b + 16 * (j + 1)), _MM_HINT_T0); } __m512 a_vec = _mm512_load_ps(a + 16 * j); // load b_vec __m512 b_vec = _mm512_load_ps(b + 16 * j); // a_vec - b_vec __m512 tmp_vec = _mm512_sub_ps(a_vec, b_vec); // sum = (tmp_vec**2) + sum sum = _mm512_fmadd_ps(tmp_vec, tmp_vec, sum); } // horizontal add sum result = _mm512_reduce_add_ps(sum); #else #ifndef _WINDOWS #pragma omp simd reduction(+ : result) aligned(a, b : 32) #endif for (_s32 i = 0; i < (_s32) size; i++) { result += (a[i] - b[i]) * (a[i] - b[i]); } #endif return result; } }; class AVX2DistanceL2Float : public Distance<float> { public: #ifndef _WINDOWS float compare(const float *a, const float *b, unsigned size) const __attribute__((hot)) { a = (const float *) __builtin_assume_aligned(a, 32); b = (const float *) __builtin_assume_aligned(b, 32); #else float compare(const float *a, const float *b, unsigned size) const { #endif float result = 0; #ifdef USE_AVX2 // assume size is divisible by 8 _u16 niters = size / 8; __m256 sum = _mm256_setzero_ps(); for (_u16 j = 0; j < niters; j++) { // scope is a[8j:8j+7], b[8j:8j+7] // load a_vec if (j < (niters - 1)) { _mm_prefetch((char *) (a + 8 * (j + 1)), _MM_HINT_T0); _mm_prefetch((char *) (b + 8 * (j + 1)), _MM_HINT_T0); } __m256 a_vec = _mm256_load_ps(a + 8 * j); // load b_vec __m256 b_vec = _mm256_load_ps(b + 8 * j); // a_vec - b_vec __m256 tmp_vec = _mm256_sub_ps(a_vec, b_vec); /* // (a_vec - b_vec)**2 __m256 tmp_vec2 = _mm256_mul_ps(tmp_vec, tmp_vec); // accumulate sum sum = _mm256_add_ps(sum, tmp_vec2); */ // sum = (tmp_vec**2) + sum sum = _mm256_fmadd_ps(tmp_vec, tmp_vec, sum); } // horizontal add sum result = _mm256_reduce_add_ps(sum); #else #ifndef _WINDOWS #pragma omp simd reduction(+ : result) aligned(a, b : 32) #endif for (_s32 i = 0; i < (_s32) size; i++) { result += (a[i] - b[i]) * (a[i] - b[i]); } #endif return result; } }; // Slow implementations of the distance functions for machines without AVX2 template<typename T> class SlowDistanceL2Int : public Distance<T> { virtual float compare(const T *a, const T *b, unsigned length) const { uint32_t result = 0; for (_u32 i = 0; i < length; i++) { result += ((int32_t)((int16_t) a[i] - (int16_t) b[i])) * ((int32_t)((int16_t) a[i] - (int16_t) b[i])); } return (float) result; } }; class SlowDistanceL2Float : public Distance<float> { virtual float compare(const float *a, const float *b, unsigned length) const { float result = 0.0f; for (_u32 i = 0; i < length; i++) { result += (a[i] - b[i]) * (a[i] - b[i]); } return result; } }; class AVXDistanceL2Int8 : public Distance<int8_t> { public: virtual float compare(const int8_t *a, const int8_t *b, unsigned int length) const { #ifndef _WINDOWS std::cout << "AVX only supported in Windows build."; return 0; } #else __m128 r = _mm_setzero_ps(); __m128i r1; while (length >= 16) { r1 = _mm_subs_epi8(_mm_load_si128((__m128i *) a), _mm_load_si128((__m128i *) b)); r = _mm_add_ps(r, _mm_mul_epi8(r1)); a += 16; b += 16; length -= 16; } r = _mm_hadd_ps(_mm_hadd_ps(r, r), r); float res = r.m128_f32[0]; if (length >= 8) { __m128 r2 = _mm_setzero_ps(); __m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 8)), _mm_load_si128((__m128i *) (b - 8))); r2 = _mm_add_ps(r2, _mm_mulhi_epi8(r3)); a += 8; b += 8; length -= 8; r2 = _mm_hadd_ps(_mm_hadd_ps(r2, r2), r2); res += r2.m128_f32[0]; } if (length >= 4) { __m128 r2 = _mm_setzero_ps(); __m128i r3 = _mm_subs_epi8(_mm_load_si128((__m128i *) (a - 12)), _mm_load_si128((__m128i *) (b - 12))); r2 = _mm_add_ps(r2, _mm_mulhi_epi8_shift32(r3)); res += r2.m128_f32[0] + r2.m128_f32[1]; } return res; } #endif }; class AVXDistanceL2Float : public Distance<float> { public: virtual float compare(const float *a, const float *b, unsigned int length) const { #ifndef _WINDOWS std::cout << "AVX only supported in Windows build."; return 0; } #else __m128 diff, v1, v2; __m128 sum = _mm_set1_ps(0); while (length >= 4) { v1 = _mm_loadu_ps(a); a += 4; v2 = _mm_loadu_ps(b); b += 4; diff = _mm_sub_ps(v1, v2); sum = _mm_add_ps(sum, _mm_mul_ps(diff, diff)); length -= 4; } return sum.m128_f32[0] + sum.m128_f32[1] + sum.m128_f32[2] + sum.m128_f32[3]; } #endif }; template<typename T> class DistanceInnerProduct : public Distance<T> { public: virtual float norm(const T *a, unsigned size) const { float result = 0; #ifdef __GNUC__ #ifdef __AVX512F__ #define AVX512_L2NORM(addr, dest, tmp) \ tmp = _mm512_loadu_ps(addr); \ dest = _mm512_fmadd_ps(tmp, tmp, dest); __m512 sum; __m512 l0, l1; unsigned D = (size + 15) & ~15U; unsigned DR = D % 32; unsigned DD = D - DR; const float *l = (float *) a; const float *e_l = l + DD; sum = _mm512_setzero_ps(); if (DR) { AVX512_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 32, l += 32) { AVX512_L2NORM(l, sum, l0); AVX512_L2NORM(l + 16, sum, l1); } result = _mm512_reduce_add_ps(sum); #elif defined(__AVX__) #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ dest = _mm256_fmadd_ps(tmp, tmp, dest); __m256 sum; __m256 l0, l1; unsigned D = (size + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = (float *) a; const float *e_l = l + DD; sum = _mm256_setzero_ps(); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } result = _mm256_reduce_add_ps(sum); #else #ifdef __SSE2__ #define SSE_L2NORM(addr, dest, tmp) \ tmp = _mm128_loadu_ps(addr); \ tmp = _mm128_mul_ps(tmp, tmp); \ dest = _mm128_add_ps(dest, tmp); __m128 sum; __m128 l0, l1, l2, l3; unsigned D = (size + 3) & ~3U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = a; const float *e_l = l + DD; float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0}; sum = _mm_load_ps(unpack); switch (DR) { case 12: SSE_L2NORM(e_l + 8, sum, l2); case 8: SSE_L2NORM(e_l + 4, sum, l1); case 4: SSE_L2NORM(e_l, sum, l0); default: break; } for (unsigned i = 0; i < DD; i += 16, l += 16) { SSE_L2NORM(l, sum, l0); SSE_L2NORM(l + 4, sum, l1); SSE_L2NORM(l + 8, sum, l2); SSE_L2NORM(l + 12, sum, l3); } _mm_storeu_ps(unpack, sum); result += unpack[0] + unpack[1] + unpack[2] + unpack[3]; #else float dot0, dot1, dot2, dot3; const float *last = a + size; const float *unroll_group = last - 3; /* Process 4 items with each loop for efficiency. */ while (a < unroll_group) { dot0 = a[0] * a[0]; dot1 = a[1] * a[1]; dot2 = a[2] * a[2]; dot3 = a[3] * a[3]; result += dot0 + dot1 + dot2 + dot3; a += 4; } /* Process last 0-3 pixels. Not needed for standard vector lengths. */ while (a < last) { result += (*a) * (*a); a++; } #endif #endif #endif return result; } virtual float compare(const T *a, const T *b, float norm, unsigned size) const = 0; #ifndef _WINDOWS virtual float compare(const T *a, const T *b, unsigned size) const __attribute__((hot)) { const float *l = (const float *) __builtin_assume_aligned(a, 32); const float *r = (const float *) __builtin_assume_aligned(b, 32); #else virtual float compare(const T *a, const T *b, unsigned size) const { const float *l = (float *) a; const float *r = (float *) b; #endif float result = 0; #ifdef __GNUC__ #ifdef __AVX512F__ #define AVX512_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm512_loadu_ps(addr1); \ tmp2 = _mm512_loadu_ps(addr2); \ dest = _mm512_fmadd_ps(tmp1, tmp2, dest); __m512 sum; __m512 l0, l1; __m512 r0, r1; unsigned D = (size + 15) & ~15U; unsigned DR = D % 32; unsigned DD = D - DR; const float *e_l = l + DD; const float *e_r = r + DD; sum = _mm512_setzero_ps(); if (DR) { AVX512_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 32, l += 32, r += 32) { AVX512_DOT(l, r, sum, l0, r0); AVX512_DOT(l + 16, r + 16, sum, l1, r1); } result = _mm512_reduce_add_ps(sum); #elif defined(__AVX__) #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1); \ tmp2 = _mm256_loadu_ps(addr2); \ dest = _mm256_fmadd_ps(tmp1, tmp2, dest); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (size + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *e_l = l + DD; const float *e_r = r + DD; sum = _mm256_setzero_ps(); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } result = _mm256_reduce_add_ps(sum); #else #ifdef __SSE2__ #define SSE_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm128_loadu_ps(addr1); \ tmp2 = _mm128_loadu_ps(addr2); \ tmp1 = _mm128_mul_ps(tmp1, tmp2); \ dest = _mm128_add_ps(dest, tmp1); __m128 sum; __m128 l0, l1, l2, l3; __m128 r0, r1, r2, r3; unsigned D = (size + 3) & ~3U; unsigned DR = D % 16; unsigned DD = D - DR; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[4] __attribute__((aligned(16))) = {0, 0, 0, 0}; sum = _mm_load_ps(unpack); switch (DR) { case 12: SSE_DOT(e_l + 8, e_r + 8, sum, l2, r2); case 8: SSE_DOT(e_l + 4, e_r + 4, sum, l1, r1); case 4: SSE_DOT(e_l, e_r, sum, l0, r0); default: break; } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { SSE_DOT(l, r, sum, l0, r0); SSE_DOT(l + 4, r + 4, sum, l1, r1); SSE_DOT(l + 8, r + 8, sum, l2, r2); SSE_DOT(l + 12, r + 12, sum, l3, r3); } _mm_storeu_ps(unpack, sum); result += unpack[0] + unpack[1] + unpack[2] + unpack[3]; #else float dot0, dot1, dot2, dot3; const float *last = a + size; const float *unroll_group = last - 3; /* Process 4 items with each loop for efficiency. */ while (a < unroll_group) { dot0 = a[0] * b[0]; dot1 = a[1] * b[1]; dot2 = a[2] * b[2]; dot3 = a[3] * b[3]; result += dot0 + dot1 + dot2 + dot3; a += 4; b += 4; } /* Process last 0-3 pixels. Not needed for standard vector lengths. */ while (a < last) { result += *a++ * *b++; } #endif #endif #endif return result; } }; template<typename T> class DistanceFastL2 : public DistanceInnerProduct<T> { public: float norm(const T *a, unsigned size) const { float norm = DistanceInnerProduct<T>::norm(a, size); if (norm == 0.0) { return std::numeric_limits<float>::max(); } return norm; } float compare(const T *a, const T *b, unsigned size) const { float norm_a = DistanceInnerProduct<T>::norm(a, size); float norm_b = DistanceInnerProduct<T>::norm(b, size); if (norm_a == 0.0 || norm_b == 0.0) { return std::numeric_limits<float>::max(); } float result = norm_a + norm_b - (2 * DistanceInnerProduct<T>::compare(a, b, size)); return result; } float compare(const T *a, const T *b, float norm, unsigned size) const { float result = norm - (2 * DistanceInnerProduct<T>::compare(a, b, size)); return result; } }; template<typename T> class DistanceFastInnerProduct : public DistanceInnerProduct<T> { public: float norm(const T *a, unsigned size) const { float norm = std::sqrt(DistanceInnerProduct<T>::norm(a, size)); if (norm == 0.0) { return std::numeric_limits<float>::max(); } return 1 / norm; } float compare(const T *a, const T *b, unsigned size) const { float norm_a = std::sqrt(DistanceInnerProduct<T>::norm(a, size)); float norm_b = std::sqrt(DistanceInnerProduct<T>::norm(b, size)); if (norm_a == 0.0 || norm_b == 0.0) { return std::numeric_limits<float>::max(); } float result = DistanceInnerProduct<T>::compare(a, b, size) / (norm_a * norm_b); return 1 - result; } float compare(const T *a, const T *b, float norm, unsigned size) const { float result = -DistanceInnerProduct<T>::compare(a, b, size) * norm; return result; } }; } // namespace diskann
ellipticSEMFEMSetup.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "elliptic.h" typedef struct{ dfloat VX; dfloat VY; dlong localId; hlong globalId; }FEMverts_t; typedef struct { dlong localId; hlong globalId; int ownerRank; }parallelNode_t; // compare on global owners int parallelCompareOwnersAndGlobalId(const void *a, const void *b); // compare on global indices int parallelCompareGlobalId(const void *a, const void *b); // compare xy coordinates int parallelCompareFEMvertsLocation(const void *a, const void *b){ dfloat NODETOL = 1e-6; FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->VX < fb->VX - NODETOL) return -1; if(fa->VX > fb->VX + NODETOL) return +1; if(fa->VY < fb->VY - NODETOL) return -1; if(fa->VY > fb->VY + NODETOL) return +1; return 0; } // compare local id int parallelCompareFEMvertsLocalId(const void *a, const void *b){ FEMverts_t *fa = (FEMverts_t*) a; FEMverts_t *fb = (FEMverts_t*) b; if(fa->localId < fb->localId) return -1; if(fa->localId > fb->localId) return +1; return 0; } int parallelCompareRowColumn(const void *a, const void *b); void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A); void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) { setupAide options = elliptic->options; if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) { printf("SEMFEM is supported for CONTINUOUS only\n"); MPI_Barrier(elliptic->mesh->comm); MPI_Finalize(); exit(0); } mesh_t* mesh = elliptic->mesh; //original mesh // mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space) mesh_t* pmesh = new mesh_t[1]; // precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh precon->femMesh = new mesh_t[1]; mesh_t *femMesh = precon->femMesh; memcpy(pmesh ,mesh,sizeof(mesh_t)); memcpy(femMesh,mesh,sizeof(mesh_t)); if (elliptic->elementType==TRIANGLES) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL) pmesh->vertexNodes[2] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTri2D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes2D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } else if (elliptic->elementType==TETRAHEDRA) { //set semfem nodes as the grid points pmesh->Np = mesh->NpFEM; pmesh->r = mesh->rFEM; pmesh->s = mesh->sFEM; pmesh->t = mesh->tFEM; //count number of face nodes in the semfem element dfloat NODETOL = 1e-6; pmesh->Nfp=0; for (int n=0;n<pmesh->Np;n++) if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++; //remake the faceNodes array pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int)); int f0=0, f1=0, f2=0, f3=0; for (int n=0;n<pmesh->Np;n++) { if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n; if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n; if (fabs(pmesh->r[n]+pmesh->s[n]+ pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n; if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n; } //remake vertexNodes array pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int)); for(int n=0;n<pmesh->Np;++n){ if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[0] = n; if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[1] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL) pmesh->vertexNodes[2] = n; if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL) pmesh->vertexNodes[3] = n; } // connect elements using parallel sort meshParallelConnect(pmesh); // compute physical (x,y) locations of the element nodes meshPhysicalNodesTet3D(pmesh); // free(sendBuffer); meshHaloSetup(pmesh); // connect face nodes (find trace indices) meshConnectFaceNodes3D(pmesh); // global nodes meshParallelConnectNodes(pmesh); //pmesh->globalIds is now populated } //now build the full degree 1 fem mesh int femN = 1; //degree of fem approximation /* allocate space for node coordinates */ femMesh->Nelements = mesh->NelFEM*mesh->Nelements; femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong)); femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); if (elliptic->dim==3) femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat)); dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong)); // dlong NFEMverts = mesh->Nelements*mesh->NpFEM; for(dlong e=0;e<mesh->Nelements;++e){ for (int n=0;n<mesh->NelFEM;n++) { dlong id[femMesh->Nverts]; dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts; for (int i=0;i<femMesh->Nverts;i++) { //local ids in the subelement fem grid id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i]; /* read vertex triplet for triangle */ femMesh->EToV[femId+i] = pmesh->globalIds[id[i]]; femMesh->EX[femId+i] = pmesh->x[id[i]]; femMesh->EY[femId+i] = pmesh->y[id[i]]; if (elliptic->dim==3) femMesh->EZ[femId+i] = pmesh->z[id[i]]; } switch(elliptic->elementType){ case TRIANGLES: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; break; case QUADRILATERALS: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element localIds[femId+3] = id[2]; break; case TETRAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[2]; localIds[femId+3] = id[3]; break; case HEXAHEDRA: localIds[femId+0] = id[0]; localIds[femId+1] = id[1]; localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element localIds[femId+3] = id[2]; localIds[femId+4] = id[4]; localIds[femId+5] = id[5]; localIds[femId+6] = id[7]; localIds[femId+7] = id[6]; break; } } } // connect elements using parallel sort meshParallelConnect(femMesh); switch(elliptic->elementType){ case TRIANGLES: meshLoadReferenceNodesTri2D(femMesh, femN); break; case QUADRILATERALS: meshLoadReferenceNodesQuad2D(femMesh, femN); break; case TETRAHEDRA: meshLoadReferenceNodesTet3D(femMesh, femN); break; case HEXAHEDRA: meshLoadReferenceNodesHex3D(femMesh, femN); break; } int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int)); for (int f=0;f<pmesh->Nfaces;f++) { for (int n=0;n<pmesh->Nfp;n++) { int id = pmesh->faceNodes[f*pmesh->Nfp+n]; faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face } } //map from faces of fem sub-elements to the macro element face number int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int)); for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1; for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { for (int face=0; face<pmesh->Nfaces;face++) { //count the nodes on this face which are on a macro face int NvertsOnFace = 0; for (int i=0;i<femMesh->Nfp;i++){ int id = femMesh->faceNodes[f*femMesh->Nfp+i]; int v = mesh->FEMEToV[n*pmesh->Nverts+id]; NvertsOnFace += faceFlag[face*pmesh->Np + v]; } if (NvertsOnFace == femMesh->Nfp) femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face } } } //fill the boundary flag array femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int)); for (dlong e=0;e<mesh->Nelements;e++) { for (int n=0;n<mesh->NelFEM;n++) { for (int f=0;f<femMesh->Nfaces;f++) { int face = femFaceMap[n*femMesh->Nfaces+f]; if (face>-1) { femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face]; } } } } free(faceFlag); free(femFaceMap); switch(elliptic->elementType){ case TRIANGLES: meshPhysicalNodesTri2D(femMesh); meshGeometricFactorsTri2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsTri2D(femMesh); break; case QUADRILATERALS: meshPhysicalNodesQuad2D(femMesh); meshGeometricFactorsQuad2D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes2D(femMesh); meshSurfaceGeometricFactorsQuad2D(femMesh); break; case TETRAHEDRA: meshPhysicalNodesTet3D(femMesh); meshGeometricFactorsTet3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsTet3D(femMesh); break; case HEXAHEDRA: meshPhysicalNodesHex3D(femMesh); meshGeometricFactorsHex3D(femMesh); meshHaloSetup(femMesh); meshConnectFaceNodes3D(femMesh); meshSurfaceGeometricFactorsHex3D(femMesh); break; } // global nodes meshParallelConnectNodes(femMesh); dlong Ntotal = pmesh->Np*pmesh->Nelements; int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0; pmesh->maskedGlobalIds = (hlong *) calloc(Ntotal,sizeof(hlong)); memcpy(pmesh->maskedGlobalIds, pmesh->globalIds, Ntotal*sizeof(hlong)); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //build a new mask for NpFEM>Np node sets // gather-scatter pmesh->ogs = ogsSetup(Ntotal, pmesh->globalIds, mesh->comm, verbose, mesh->device); //make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann) int *mapB = (int *) calloc(Ntotal,sizeof(int)); for (dlong e=0;e<pmesh->Nelements;e++) { for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9; for (int f=0;f<pmesh->Nfaces;f++) { int bc = pmesh->EToB[f+e*pmesh->Nfaces]; if (bc>0) { for (int n=0;n<pmesh->Nfp;n++) { int BCFlag = elliptic->BCType[bc]; int fid = pmesh->faceNodes[n+f*pmesh->Nfp]; mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]); } } } } ogsGatherScatter(mapB, ogsInt, ogsMin, pmesh->ogs); //use the bc flags to find masked ids for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) { if (mapB[n] == 1) { //Dirichlet boundary pmesh->maskedGlobalIds[n] = 0; } } free(mapB); } else { //mask using the original mask for (dlong n=0;n<elliptic->Nmasked;n++) pmesh->maskedGlobalIds[elliptic->maskIds[n]] = 0; } //build masked gs handle precon->FEMogs = ogsSetup(Ntotal, pmesh->maskedGlobalIds, mesh->comm, verbose, mesh->device); // number of degrees of freedom on this rank (after gathering) hlong Ngather = precon->FEMogs->Ngather; // create a global numbering system hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong)); int *owner = (int *) calloc(Ngather,sizeof(int)); // every gathered degree of freedom has its own global id hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong)); MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm); for(int r=0;r<mesh->size;++r) globalStarts[r+1] = globalStarts[r]+globalStarts[r+1]; //use the offsets to set a consecutive global numbering for (dlong n =0;n<precon->FEMogs->Ngather;n++) { globalIds[n] = n + globalStarts[mesh->rank]; owner[n] = mesh->rank; } //scatter this numbering to the original nodes hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong)); int *globalOwners = (int *) calloc(Ntotal,sizeof(int)); for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1; ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, precon->FEMogs); ogsScatter(globalOwners, owner, ogsInt, ogsAdd, precon->FEMogs); free(globalIds); free(owner); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { //dont need these anymore free(pmesh->vmapM); free(pmesh->vmapP); free(pmesh->mapP); //maybe more cleanup can go here } if (elliptic->elementType==TRIANGLES) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; } } } } } else if (elliptic->elementType==TETRAHEDRA) { //build stiffness matrices femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat)); for (int n=0;n<femMesh->Np;n++) { for (int m=0;m<femMesh->Np;m++) { for (int k=0;k<femMesh->Np;k++) { for (int l=0;l<femMesh->Np;l++) { femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np]; femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np]; femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np]; } } } } } if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout); // Build non-zeros of stiffness matrix (unassembled) dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements; dlong cnt =0; nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t)); int *AsendCounts = (int*) calloc(mesh->size, sizeof(int)); int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int)); int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int)); int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int)); //Build unassembed non-zeros switch(elliptic->elementType){ case TRIANGLES: BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case QUADRILATERALS: BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case TETRAHEDRA: BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; case HEXAHEDRA: BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break; } // Make the MPI_NONZERO_T data type MPI_Datatype MPI_NONZERO_T; MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT}; int blength[4] = {1, 1, 1, 1}; MPI_Aint addr[4], displ[4]; MPI_Get_address ( &(sendNonZeros[0] ), addr+0); MPI_Get_address ( &(sendNonZeros[0].col ), addr+1); MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2); MPI_Get_address ( &(sendNonZeros[0].val ), addr+3); displ[0] = 0; displ[1] = addr[1] - addr[0]; displ[2] = addr[2] - addr[0]; displ[3] = addr[3] - addr[0]; MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T); MPI_Type_commit (&MPI_NONZERO_T); // count how many non-zeros to send to each process for(dlong n=0;n<cnt;++n) AsendCounts[sendNonZeros[n].ownerRank]++; // sort by row ordering qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn); // find how many nodes to expect (should use sparse version) MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm); // find send and recv offsets for gather dlong nnz = 0; for(int r=0;r<mesh->size;++r){ AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r]; ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r]; nnz += ArecvCounts[r]; } nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t)); // determine number to receive MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T, A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T, mesh->comm); // sort received non-zero entries by row block (may need to switch compareRowColumn tests) qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn); // compress duplicates cnt = 0; for(dlong n=1;n<nnz;++n){ if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){ A[cnt].val += A[n].val; } else{ ++cnt; A[cnt] = A[n]; } } if (nnz) cnt++; nnz = cnt; if(mesh->rank==0) printf("done.\n"); MPI_Barrier(mesh->comm); MPI_Type_free(&MPI_NONZERO_T); hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong)); hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong)); dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat)); for (dlong n=0;n<nnz;n++) { Rows[n] = A[n].row; Cols[n] = A[n].col; Vals[n] = A[n].val; } free(A); precon->parAlmond = parAlmond::Init(mesh->device, mesh->comm, options); parAlmond::AMGSetup(precon->parAlmond, globalStarts, nnz, Rows, Cols, Vals, elliptic->allNeumann, elliptic->allNeumannPenalty); free(Rows); free(Cols); free(Vals); if (options.compareArgs("VERBOSE", "TRUE")) parAlmond::Report(precon->parAlmond); if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) { // //tell parAlmond not to gather this level (its done manually) // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = false; // baseLevel->weightedInnerProds = false; // build interp and anterp dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat)); for(int n=0;n<mesh->NpFEM;++n){ for(int m=0;m<mesh->Np;++m){ SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m]; } } mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp); mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp); free(SEMFEMAnterp); precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat)); precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat)); } else { // //tell parAlmond to gather this level // agmgLevel *baseLevel = precon->parAlmond->levels[0]; // baseLevel->gatherLevel = true; parAlmond::multigridLevel *baseLevel = precon->parAlmond->levels[0]; precon->rhsG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->xG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat)); precon->o_rhsG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); precon->o_xG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat)); // baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat)); // baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat)); // baseLevel->weightedInnerProds = false; // baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*)); // baseLevel->gatherArgs[0] = (void *) elliptic; // baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid // baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx); // baseLevel->scatterArgs = baseLevel->gatherArgs; // baseLevel->device_gather = ellipticGather; // baseLevel->device_scatter = ellipticScatter; } } void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if (ny==my) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } id = mx+ny*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; if (nx==mx) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)) { id = nx + ny*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID]; dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID]; dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID]; dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID]; dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID]; dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID]; dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID]; for (int n=0;n<femMesh->Np;n++) { dlong idn = localIds[e*femMesh->Np + n]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int m=0;m<femMesh->Np;m++) { dlong idm = localIds[e*femMesh->Np + m]; if (globalNumbering[idm]<0) continue; //skip masked nodes dfloat val = 0.; val += Grr*femMesh->Srr[m+n*femMesh->Np]; val += Grs*femMesh->Srs[m+n*femMesh->Np]; val += Grt*femMesh->Srt[m+n*femMesh->Np]; val += Grs*femMesh->Ssr[m+n*femMesh->Np]; val += Gss*femMesh->Sss[m+n*femMesh->Np]; val += Gst*femMesh->Sst[m+n*femMesh->Np]; val += Grt*femMesh->Str[m+n*femMesh->Np]; val += Gst*femMesh->Sts[m+n*femMesh->Np]; val += Gtt*femMesh->Stt[m+n*femMesh->Np]; val += J*lambda*femMesh->MM[m+n*femMesh->Np]; dfloat nonZeroThreshold = 1e-7; if (fabs(val)>nonZeroThreshold) { #pragma omp critical { // pack non-zero A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering, int *globalOwners, dlong *cnt, nonZero_t *A) { #pragma omp parallel for for (dlong e=0;e<femMesh->Nelements;e++) { for (int nz=0;nz<femMesh->Nq;nz++) { for (int ny=0;ny<femMesh->Nq;ny++) { for (int nx=0;nx<femMesh->Nq;nx++) { dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dlong idn = localIds[e*femMesh->Np + nn]; if (globalNumbering[idn]<0) continue; //skip masked nodes for (int mz=0;mz<femMesh->Nq;mz++) { for (int my=0;my<femMesh->Nq;my++) { for (int mx=0;mx<femMesh->Nq;mx++) { dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dlong idm = localIds[e*femMesh->Np + mm]; if (globalNumbering[idm]<0) continue; //skip masked nodes int id; dfloat val = 0.; if ((ny==my)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np]; val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq]; } } if (nz==mz) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq]; id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np]; val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq]; } if (ny==my) { id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np]; val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(nz==mz)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np]; val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq]; } } if (nx==mx) { id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq]; id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq; dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np]; val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq]; } if ((nx==mx)&&(ny==my)) { for (int k=0;k<femMesh->Nq;k++) { id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq; dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np]; val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq]; } } if ((nx==mx)&&(ny==my)&&(nz==mz)) { id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq; dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np]; val += JW*lambda; } // pack non-zero dfloat nonZeroThreshold = 1e-7; if (fabs(val) >= nonZeroThreshold) { #pragma omp critical { A[*cnt].val = val; A[*cnt].row = globalNumbering[idn]; A[*cnt].col = globalNumbering[idm]; A[*cnt].ownerRank = globalOwners[idn]; (*cnt)++; } } } } } } } } } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,8);t1++) { lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16)); ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(16*t2-Nz-4,8)),t1);t3<=min(min(min(floord(Nt+Ny-4,8),floord(8*t1+Ny+13,8)),floord(16*t2+Ny+12,8)),floord(16*t1-16*t2+Nz+Ny+11,8));t3++) { for (t4=max(max(max(0,ceild(t1-255,256)),ceild(16*t2-Nz-2044,2048)),ceild(8*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(8*t1+Nx+13,2048)),floord(16*t2+Nx+12,2048)),floord(8*t3+Nx+4,2048)),floord(16*t1-16*t2+Nz+Nx+11,2048));t4++) { for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),8*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),8*t3+6),2048*t4+2046),16*t1-16*t2+Nz+13);t5++) { for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
dftcommon.c
// Copyright Naoki Shibata 2010 - 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <ctype.h> #include <inttypes.h> #include <assert.h> #if defined(POWER64_UNDEF_USE_EXTERN_INLINES) // This is a workaround required to cross compile for PPC64 binaries #include <features.h> #ifdef __USE_EXTERN_INLINES #undef __USE_EXTERN_INLINES #endif #endif #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "sleef.h" #define IMPORT_IS_EXPORT #include "sleefdft.h" #include "dispatchparam.h" #include "dftcommon.h" #include "common.h" #include "arraymap.h" #define MAGIC_FLOAT 0x31415926 #define MAGIC_DOUBLE 0x27182818 #define MAGIC_LONGDOUBLE 0x14142135 #define MAGIC_QUAD 0x33166247 #define MAGIC2D_FLOAT 0x22360679 #define MAGIC2D_DOUBLE 0x17320508 #define MAGIC2D_LONGDOUBLE 0x26457513 #define MAGIC2D_QUAD 0x36055512 const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" }; static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) { int pathLen = 0, l2l = 0; for(;;) { while(*p == ' ') p++; if (*p == '\0') break; if (!isdigit(*p)) return -1; pathLen++; if (pathLen >= pathLenMax) return -2; int n = 0; while(isdigit(*p)) n = n * 10 + *p++ - '0'; if (n > MAXBUTWIDTH) return -6; path[pathLen-1] = n; l2l += n; config[pathLen-1] = 0; if (*p != '(') continue; int c; for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break; if (c == -1) return -3; p += strlen(configStr[c]) + 1; if (*p != ')') return -4; p++; config[pathLen-1] = c; } if (l2l != log2len) return -5; return pathLen; } EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); int path[32], config[32]; int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len); if (pathLen < 0) { if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr); return; } for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0; for(int level = p->log2len, j=0;level > 0 && j < pathLen;) { p->bestPath[level] = path[j]; p->bestPathConfig[level] = config[j]; level -= path[j]; j++; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; if ((p->mode & SLEEF_MODE_VERBOSE) != 0) { printf("Set path : "); for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]); printf("\n"); } } void freeTables(SleefDFT *p) { for(int N=1;N<=MAXBUTWIDTH;N++) { for(uint32_t level=N;level<=p->log2len;level++) { Sleef_free(p->tbl[N][level]); } free(p->tbl[N]); p->tbl[N] = NULL; } } EXPORT void SleefDFT_dispose(SleefDFT *p) { if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)) { Sleef_free(p->tBuf); SleefDFT_dispose(p->instH); if (p->hlen != p->vlen) SleefDFT_dispose(p->instV); p->magic = 0; free(p); return; } assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); if (p->log2len <= 1) { p->magic = 0; free(p); return; } if ((p->mode & SLEEF_MODE_REAL) != 0) { Sleef_free(p->rtCoef1); Sleef_free(p->rtCoef0); p->rtCoef0 = p->rtCoef1 = NULL; } for(int level = p->log2len;level >= 1;level--) { Sleef_free(p->perm[level]); } free(p->perm); p->perm = NULL; freeTables(p); p->magic = 0; free(p); } uint32_t ilog2(uint32_t q) { static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4}; uint32_t r = 0,qq; if (q & 0xffff0000) r = 16; q >>= r; qq = q | (q >> 1); qq |= (qq >> 2); qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10); return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1; } // char *dftPlanFilePath = NULL; char *archID = NULL; uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR; ArrayMap *planMap = NULL; int planFilePathSet = 0, planFileLoaded = 0; #ifdef _OPENMP omp_lock_t planMapLock; int planMapLockInitialized = 0; #endif static void initPlanMapLock() { #ifdef _OPENMP #pragma omp critical { if (!planMapLockInitialized) { planMapLockInitialized = 1; omp_init_lock(&planMapLock); } } #endif } static void planMap_clear() { if (planMap != NULL) ArrayMap_dispose(planMap); planMap = NULL; } EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) { initPlanMapLock(); if ((mode & SLEEF_PLAN_RESET) != 0) { planMap_clear(); planFileLoaded = 0; planFilePathSet = 0; } if (dftPlanFilePath != NULL) free(dftPlanFilePath); if (path != NULL) { dftPlanFilePath = malloc(strlen(path)+10); strcpy(dftPlanFilePath, path); } else { dftPlanFilePath = NULL; } if (archID != NULL) free(archID); if (arch == NULL) arch = Sleef_getCpuIdString(); archID = malloc(strlen(arch)+10); strcpy(archID, arch); planMode = mode; planFilePathSet = 1; } static void loadPlanFromFile() { if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) { char *s = getenv(ENVVAR); if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode); } if (planMap != NULL) ArrayMap_dispose(planMap); if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) { planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0); } if (planMap == NULL) planMap = initArrayMap(); planFileLoaded = 1; } static void savePlanToFile() { assert(planFileLoaded); if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) { ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID); } } #define CATBIT 8 #define BASETYPEIDBIT 2 #define LOG2LENBIT 8 #define DIRBIT 1 #define BUTSTATBIT 16 static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 0; uint64_t k = 0; k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } #define LEVELBIT LOG2LENBIT #define BUTCONFIGBIT 8 #define TRANSCONFIGBIT 8 static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) { int max = MAX(hlen, vlen), min = MIN(hlen, vlen); int cat = 2; uint64_t k = 0; k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT)); k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 3; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) { dir = (dir & SLEEF_MODE_BACKWARD) == 0; int cat = 4; uint64_t k = 0; k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT)); k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT)); k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT)); k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT)); k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT)); return k; } static uint64_t planMap_getU64(uint64_t key) { char *s = ArrayMap_get(planMap, key); if (s == NULL) return 0; uint64_t ret; if (sscanf(s, "%" SCNx64, &ret) != 1) return 0; return ret; } static void planMap_putU64(uint64_t key, uint64_t value) { char *s = malloc(100); sprintf(s, "%" PRIx64, value); s = ArrayMap_put(planMap, key, s); if (s != NULL) free(s); } int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)); if (stat == 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return 0; } int ret = 1; for(int j = p->log2len;j >= 0;j--) { p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat)); p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat)); if (p->bestPath[j] > MAXBUTWIDTH) ret = 0; } p->pathLen = 0; for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++; #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return ret; } void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) { assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD)); initPlanMapLock(); #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) { #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return; } for(int j = p->log2len;j >= 0;j--) { planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]); planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]); } planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif } int PlanManager_loadMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)); initPlanMapLock(); int ret = 0; #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0)); p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1)); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif return p->tmNoMT != 0; } void PlanManager_saveMeasurementResultsT(SleefDFT *p) { assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)); initPlanMapLock(); int ret = 0; #ifdef _OPENMP omp_set_lock(&planMapLock); #endif if (!planFileLoaded) loadPlanFromFile(); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT); planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT ); if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile(); #ifdef _OPENMP omp_unset_lock(&planMapLock); #endif }
distributed_block_vector.h
/* Copyright (c) 2020, VSB - Technical University of Ostrava and Graz University of Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of VSB - Technical University of Ostrava and Graz University of Technology nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL VSB - TECHNICAL UNIVERSITY OF OSTRAVA AND GRAZ UNIVERSITY OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** @file distributed_block_vector.h * @brief Contains a class representing a block vector, i.e. a vector of scalars * partitioned into blocks. The block vector is distributed among MPI ranks. * @note updated documentation */ #ifndef INCLUDE_BESTHEA_DISTRIBUTED_BLOCK_VECTOR_H_ #define INCLUDE_BESTHEA_DISTRIBUTED_BLOCK_VECTOR_H_ #include "besthea/full_matrix.h" #include "besthea/settings.h" #include "besthea/vector.h" #include <algorithm> #include <cmath> #include <iostream> #include <limits> #include <mpi.h> #include <vector> namespace besthea { namespace linear_algebra { class distributed_block_vector; } } namespace besthea { namespace mesh { class general_spacetime_cluster; } } namespace besthea { namespace bem { template< class basis_type > class distributed_fast_spacetime_be_space; } } /** * Class representing a distributed block vector, i.e. a vector of scalars * partitioned into blocks, which are distributed among MPI ranks. */ class besthea::linear_algebra::distributed_block_vector { public: using vector_type = besthea::linear_algebra::vector; //!< Vector type. /** * Constructor. */ distributed_block_vector( ); /** * Copy constructor. * @param[in] that Vector to be copied. */ distributed_block_vector( const distributed_block_vector & that ); /** * Constructs a distributed block vector with an initializer list. * All @p n_blocks have the same size and entries as the provided list. The * block vector is duplicated on all MPI ranks of the given communicator. * @param[in] n_blocks Number of blocks. * @param[in] list Initializer list for vector. * @param[in] comm MPI communicator associated with the vector. */ distributed_block_vector( lo n_blocks, std::initializer_list< sc > list, MPI_Comm comm = MPI_COMM_WORLD ); /** * Constructs a distributed block vector with a given number of blocks of * given size. The block vector is duplicated on all MPI ranks of the given * communicator. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] zero Initialize to 0 if true. * @param[in] comm MPI communicator associated with the vector. */ distributed_block_vector( lo n_blocks, lo size, bool zero = true, MPI_Comm comm = MPI_COMM_WORLD ); /** * Constructs a distributed block vector with a given number of blocks of * given size. The vector is distributed according to the information in the * vector @p my_blocks. * @param[in] my_blocks Indices of blocks, which are owned by the executing * process. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] zero Initialize to 0 if true. * @param[in] comm MPI communicator associated with the vector. */ distributed_block_vector( std::vector< lo > & my_blocks, lo n_blocks, lo size, bool zero = true, MPI_Comm comm = MPI_COMM_WORLD ); ~distributed_block_vector( ); /** * Returns a reference to a single block. * @param[in] d Index of the block. * @warning If the executing process does not own the d-th block the returned * vector is empty. */ vector_type & get_block( lo d ) { return _data[ d ]; } /** * Returns a constant reference to a single block. * @param[in] d Index of the block. * @warning If the executing process does not own the d-th block the returned * vector is empty. */ const vector_type & get_block( lo d ) const { return _data[ d ]; } /** * @brief Returns the i-th element of the d-th block. * @param[in] d Block index. * @param[in] i Element index. * @warning Returns NaN if the d-th block is not owned by the executing * process. */ sc get( lo d, lo i ) const { sc val; if ( _owners[ d ][ 0 ] == _rank ) { val = _data[ d ][ i ]; } else { val = std::numeric_limits< double >::quiet_NaN( ); } return val; } /** * Returns the number of blocks. */ lo get_n_blocks( ) const { return _n_blocks; } /** * Returns the size of a single block */ lo get_size_of_block( ) const { return _size; } /** * Returns the size of the whole block vector, i.e. the total number of * elements. */ lo size( ) const { return _n_blocks * _size; } /** * Resizes the block vector by changing the number of blocks. The resulting * block vector is duplicated on all MPI ranks of its communicator. * @param[in] n_blocks New number of blocks. * @warning The existing data is not communicated between the MPI ranks. The * duplication is understood in the sense that all ranks are listed as owners * after resizing the block vector. */ void resize( lo n_blocks ); /** * Resizes the block vector by changing the number of blocks. The owners are * reset using the information in @p my_blocks. * @param[in] my_blocks Indices of blocks, which are owned by the executing * process. The member @p _my_blocks is overwritten with * this vector. * @param[in] n_blocks New number of blocks. */ void resize( std::vector< lo > & my_blocks, lo n_blocks ); /** * Resizes all blocks of the block vector, which are owned by the executing * process. * @param[in] size New size of each block. * @param[in] zero If true, all blocks are filled with zeros. */ void resize_blocks( lo size, bool zero = true ) { lo i = 0; for ( vector_type & v : _data ) { if ( _owners[ i ][ 0 ] == _rank ) { v.resize( size, zero ); } ++i; } _size = size; } /** * @brief Sets the i-th element of the d-th block * @param[in] d Block index. * @param[in] i Element index. * @param[in] value Value to be set. * @warning If the executing process does not own the block nothing happens. */ void set( lo d, lo i, sc value ) { if ( _owners[ d ][ 0 ] == _rank ) { _data[ d ][ i ] = value; } } /** * @brief Adds a value atomically(!) to a single element of a single block. * @param[in] d Block index. * @param[in] i Element index. * @param[in] value Value to be added. * @warning If the executing process does not own the block nothing happens. */ void add_atomic( lo d, lo i, sc value ) { if ( _owners[ d ][ 0 ] == _rank ) { #pragma omp atomic update _data[ d ][ i ] += value; } } /*! * @brief Adds a value to a single element of a single block. * @param[in] d Block index. * @param[in] i Element index. * @param[in] value Value to be added. * @warning If the executing process does not own the block nothing happens. */ void add( lo d, lo i, sc value ) { if ( _owners[ d ][ 0 ] == _rank ) { _data[ d ][ i ] += value; } } /*! * Scales all blocks owned by the executing process by a scalar alpha. * @param[in] alpha Scaling factor. */ void scale( sc alpha ) { for ( auto & it : _my_blocks ) { _data[ it ].scale( alpha ); } } /** * Copies data from another distributed block vector. * @param[in] that Vector to be copied. */ void copy( const distributed_block_vector & that ); /*! * @brief Copies data from a raw array. The resulting block vector is * duplicated on all MPI ranks of its communicator. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] data Array to copy from. Contains all elements, block by block. * @note If @p n_blocks and @p size are different from the member variables * @p _n_blocks and @p _size, respectively, the block vector is resized * appropriately. * @warning The source array has to contain at least @p n_blocks * @p size * elements. */ void copy_from_raw( lo n_blocks, lo size, const sc * data ); /*! * @brief Fills the distributed vector by copying data from a raw array. Only * the blocks owned by the executing process are filled with the associated * values of the array. * @param[in] my_blocks Indices of blocks, which are owned by the executing * process. The member @p _my_blocks is overwritten with * this vector. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] data Array to copy from. Contains all elements, block by block. * @note If @p n_blocks and @p size are different from the member variables * @p _n_blocks and @p _size, respectively, the block vector is resized * appropriately. * @warning The source array has to contain at least @p n_blocks * @p size * elements. * @warning If @p _n_blocks == @p n_blocks then it is assumed that * @p _my_blocks == @p my_blocks, i.e. this information is not updated. */ void copy_from_raw( std::vector< lo > & my_blocks, lo n_blocks, lo size, const sc * data ); /*! * @brief Copies the whole distributed block vector to a raw array. If the * block vector is not duplicated, all blocks are broadcasted by the * respective primary owners and then written to (a local copy of) data by all * ranks. * @param[in,out] data Array to copy to. Is filled with all elements, block by * block. * @warning The array's size has to be at least @p _n_blocks * @p _size. */ void copy_to_raw( sc * data ) const; /*! * @brief Copies data from a raw vector. The resulting block vector is * duplicated on all MPI ranks of its communicator. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] data Vector to copy from. Contains all elements, block by block. * @note If @p n_blocks and @p size are different from the member variables * @p _n_blocks and @p _size, respectively, the block vector is resized * appropriately. * @warning The source vector has to contain at least @p n_blocks * @p size * elements. */ void copy_from_vector( lo n_blocks, lo size, const vector_type & data ); /*! * @brief Fills the distributed vector by copying data from a raw vector. Only * the blocks owned by the executing process are filled with the associated * values of the vector. * @param[in] my_blocks Indices of blocks, which are owned by the executing * process. The member @p _my_blocks is overwritten with * this vector. * @param[in] n_blocks Number of blocks. * @param[in] size Size of each block. * @param[in] data Vector to copy from. Contains all elements, block by block. * @note If @p n_blocks and @p size are different from the member variables * @p _n_blocks and @p _size, respectively, the block vector is resized * appropriately. * @warning The source array has to contain at least @p n_blocks * @p size * elements. * @warning If @p _n_blocks == @p n_blocks then it is assumed that * @p _my_blocks == @p my_blocks, i.e. this information is not updated. */ void copy_from_vector( std::vector< lo > & my_blocks, lo n_blocks, lo size, const vector_type & data ); /*! * @brief Copies the whole distributed block vector to a raw vector. If the * block vector is not duplicated, all blocks are broadcasted by the * respective primary owners and then written to (a local copy of) data by all * ranks. * @param[in,out] data Vector to copy to. Is filled with all elements, block * by block. * @note The vector is resized. New size is @p _n_blocks * @p _size. */ void copy_to_vector( vector_type & data ) const; /*! * @brief Vector addition: this += alpha * v. * @param[in] v Block vector with the same number and size of blocks. * @param[in] alpha Scaling factor. * @warning The executing process applies the vector addition for a block only * if it owns this block and the corresponding block of @p v. In particular, * only vectors with the same distribution across MPI processes will be summed * up correctly. */ void add( distributed_block_vector const & v, sc alpha = 1.0 ); /*! * @brief Fills all blocks which are owned by the executing process with the * given value. * @param[in] value Value to fill the owned blocks with. */ void fill( sc value ) { for ( lo i = 0; i < _n_blocks; ++i ) { if ( am_i_owner( i ) ) { _data[ i ].fill( value ); } } } /*! * @brief Returns the euclidean dot product. * @param[in] v Second distributed block vector for dot product. * @warning @p v has to have the same dimensions and distribution across MPI * processes, otherwise the result is wrong and the behavior is undefined. */ sc dot( distributed_block_vector const & v ) const; /*! * @brief Returns the Euclidean norm of the vector. * @return Euclidean norm of the vector. */ sc norm( ) const { return std::sqrt( this->dot( *this ) ); } /*! * Synchronizes the distributed block vector via MPI communication. For each * block, its primary owner sends the data to all other owners. */ void synchronize_shared_parts( ); /*! * Gets the local part of a distributed block vector corresponding to the dofs * in a spacetime cluster. * @param[in] cluster Cluster determining the local dofs. * @param[in,out] local_vector Local part of block vector. * @tparam space_type @ref besthea::bem::distributed_fast_spacetime_be_space * representing either p0 or p1 basis functions. It * determines the DOFs. * @warning The local vector must have the correct size. * @note The local vector is not a block vector anymore, but a contiguous * vector. * @warning The executing process has to own the blocks corresponding to the * dofs in the spacetime cluster, otherwise the local vector is not filled * correctly. */ template< class space_type > void get_local_part( besthea::mesh::general_spacetime_cluster * cluster, besthea::linear_algebra::vector & local_vector ) const; /*! * Gets the local part of a distributed block vector corresponding to the dofs * in a spacetime cluster and stores it in full matrix format. * @param[in] cluster Cluster determining the local dofs. * @param[in,out] local_part Local part of block vector. * @tparam space_type @ref besthea::bem::distributed_fast_spacetime_be_space * representing either p0 or p1 basis functions. It * determines the DOFs. * @note Rows of the output matrix correspond to time, columns to space. */ template< class space_type > void get_local_part( besthea::mesh::general_spacetime_cluster * cluster, besthea::linear_algebra::full_matrix & local_part ) const; /*! * Adds a local vector to the appropriate positions of a distributed block * vector. The positions are determined by the dofs in a spacetime cluster. * @param[in] cluster Cluster determining the positions in the distributed * blockvector to which the local vector is added. * @param[in] local_vector Local part of block vector to be added. * @tparam space_type @ref besthea::bem::distributed_fast_spacetime_be_space * representing either p0 or p1 basis functions. It * determines the DOFs. * @note The entries in the local vector are ordered according to the ordering * of the time elements and spatial dofs in the spacetime cluster (time * step after time step). * @warning The executing process has to own the blocks corresponding to the * dofs in the spacetime cluster, otherwise nothing is added. */ template< class space_type > void add_local_part( const besthea::mesh::general_spacetime_cluster * cluster, const besthea::linear_algebra::vector & local_vector ); /*! * Adds local part stored in full matrix format to the appropriate positions * of a distributed block vector. The positions are determined by the dofs in * a spacetime cluster. * @param[in] cluster Cluster determining the positions in the distributed * blockvector to which the local vector is added. * @param[in] local_part Local part of block vector to be added. It is stored in matrix format, where rows correspond to time and columns to space. * @tparam space_type @ref besthea::bem::distributed_fast_spacetime_be_space * representing either p0 or p1 basis functions. It * determines the DOFs. */ template< class space_type > void add_local_part( const besthea::mesh::general_spacetime_cluster * cluster, const besthea::linear_algebra::full_matrix & local_part ); /*! * @brief Returns reference to the vector of vector of MPI ranks owning * individual blocks. Outer vector corresponds to vector's blocks. * @return std::vector of std::vectors of MPI ranks owning individual blocks. */ const std::vector< std::vector< int > > & get_owners( ) const { return _owners; } /*! * @brief Returns a reference to @ref _my_blocks. */ std::vector< lo > get_my_blocks( ) const { return _my_blocks; } /*! * Indicates if the executing process owns the given block. * @param[in] block_idx Block index. * @return True if the calling process owns the given block. */ bool am_i_owner( lo block_idx ) const { return ( _owners[ block_idx ][ 0 ] == _rank ); } /*! * Indicates if the executing process is the primary owner of the given block. * @param[in] block_idx Block index. * @return True if the calling process is the primary owner of the block. */ bool am_i_primary_owner( lo block_idx ) const { return ( get_primary_owner( block_idx ) == _rank ); } /*! * Returns the rank of the primary owner of a block. The primary owner is the * one with the lowest rank. * @param[in] block_idx Block index. * @returns The rank of the primary owner of the block. */ int get_primary_owner( lo block_idx ) const { if ( _owners[ block_idx ].size( ) == 1 ) { return _owners[ block_idx ][ 0 ]; } else { return _owners[ block_idx ][ 0 ] < _owners[ block_idx ][ 1 ] ? _owners[ block_idx ][ 0 ] : _owners[ block_idx ][ 1 ]; } } /*! * Realizes the communication of a block between two processes. Sender and * receiver both call this method. The sender is uniquely defined as the * primary owner of the communicated block. * @param[in] block_idx Index of the communicated block. * @param[in] rank MPI rank of receiver. * @param[out] data Vector to store the received data. */ void communicate_block( lo block_idx, int rank, vector_type & data ) const; /*! * @brief Prints the vector. * @param[in] stream Stream into which the vector is printed. */ void print( std::ostream & stream = std::cout ) const; /*! * @brief Returns the value of @ref _comm. * @todo Can we return a reference instead? */ MPI_Comm get_comm( ) const { return _comm; } /*! * @brief Returns the value of @ref _duplicated. */ bool is_duplicated( ) const { return _duplicated; } protected: /*! * @brief Collects the information about the owners of all blocks. For this * purpose, the information in @p my_blocks is distributed among all MPI * processes. * @param[in] my_blocks Blocks owned by the executing process. * @note Each process updates its copy of @p _owners. */ void communicate_owners( std::vector< lo > & my_blocks ); lo _n_blocks; //!< number of blocks. lo _size; //!< size of each block. std::vector< vector_type > _data; //!< raw data std::vector< std::vector< int > > _owners; //!< Structure to identify the owners of the blocks of the vector. //!< @p _owners [i] is a vector containing those MPI ranks who own //!< block i. If the executing rank owns a block, the rank itself //!< is listed at the first position in the vector corresponding //!< to this block. All other ranks are sorted in ascending order. //!< The primary owner is the owner with the lowest rank. std::vector< lo > _my_blocks; //!< List of blocks the rank owns. MPI_Comm _comm; //!< MPI communicator associated with the block vector. int _rank; //!< MPI rank of the executing process. bool _duplicated; //!< Indicates if the vector is duplicated on all MPI //!< processes. }; #endif
GB_unaryop__ainv_int16_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_uint64 // op(A') function: GB_tran__ainv_int16_uint64 // C type: int16_t // A type: uint64_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_uint64 ( int16_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nsgaii.h
#if defined(__posix) || defined(__unix) || defined(__linux) || defined(__APPLE__) // #pragma GCC diagnostic ignored "-Wreorder" // #pragma GCC diagnostic ignored "-Wunused-variable" #pragma GCC diagnostic ignored "-Wformat=" #pragma GCC diagnostic ignored "-Wsign-compare" #endif #ifndef __NSGAII_H__ #define __NSGAII_H__ #include <numeric> #include <utility> #include <limits> #include <algorithm> #include "utils.h" #include "base_ga.h" namespace algorithms { /* Multi-objective genetic algorithm based on NSGA-II Deb, K., Pratap, A., Agarwal, S. and Meyarivan, T.A.M.T., 2002. A fast and elitist multiobjective genetic algorithm: NSGA-II. IEEE transactions on evolutionary computation, 6(2), pp.182-197. http://ieeexplore.ieee.org/document/996017/?reload=true */ template<class Chromosome, class FitnessFunction> class NSGAII : public BaseGA<Chromosome, FitnessFunction> { using BaseGA<Chromosome, FitnessFunction>::BaseGA; using BaseGA<Chromosome, FitnessFunction>::Select; using BaseGA<Chromosome, FitnessFunction>::Reproduce; using BaseGA<Chromosome, FitnessFunction>::fitness_function; using BaseGA<Chromosome, FitnessFunction>::indices; using BaseGA<Chromosome, FitnessFunction>::parents; using BaseGA<Chromosome, FitnessFunction>::offspring; typedef typename BaseGA<Chromosome, FitnessFunction>::Population Population; Population top_front; /* Checks the dominance. 1 if p dominates q -1 if q dominates p 0 if both are non-dominated */ static inline int CheckDominance(const Chromosome &p, const Chromosome &q) { // If either p or q is infeasible if (p.constraints != utils::Approx(q.constraints)) { // Checks for floating point 'equality' return (p.constraints < q.constraints) ? 1 : -1; } bool p_dominates = false, q_dominates = false; for (int m = 0; m != p.objectives.size(); ++m) { if (p.objectives[m] < q.objectives[m]) { p_dominates = true; } if (p.objectives[m] > q.objectives[m]) { q_dominates = true; } } if (p_dominates && !q_dominates) { return 1; } else if (!p_dominates && q_dominates) { return -1; } return 0; } /* Returns true if p wins the tournament against q, false otherwise. */ inline bool Tournament(const Chromosome &p, const Chromosome &q) override { int domination_flag = CheckDominance(p, q); // If p is better than q in all objectives or has better constraint satisfaction if (domination_flag == 1) { return true; } else if (domination_flag == -1) { return false; } if (p.d > q.d) { return true; } else if (p.d < q.d) { return false; } return utils::random() < 0.5; } void NonDominatedSort(Population &R, std::vector<Population> &F) { for (auto &i : R) { i.S.resize(0); i.S.reserve(R.size() / 2); i.n = 0; } // First front F.resize(1); for (int p = 0; p < R.size(); ++p) { for (int q = p + 1; q != R.size(); ++q) { auto domination_flag = CheckDominance(R[p], R[q]); // If p dominates q if (domination_flag == 1) { R[p].S.push_back(q); ++R[q].n; } // If q dominates p else if (domination_flag == -1) { R[q].S.push_back(p); ++R[p].n; } } if (R[p].n == 0) { R[p].rank = 1; F[0].push_back(std::move(R[p])); } } int i = 0; Population Q; while (1) { Q.resize(0); for (auto &p : F[i]) { for (int q : p.S) { --R[q].n; if (R[q].n == 0) { R[q].rank = i + 2; // +2 because i starts at 0 Q.push_back(R[q]); } } } if (Q.empty()) { break; } F.push_back(std::move(Q)); ++i; } } void CalculateCrowdingDistance(Population &I) { for (auto &i : I) { i.d = 0; } I[0].d = std::numeric_limits<int>::infinity(); I.back().d = std::numeric_limits<int>::infinity(); if (I.size() > 2) { for (int m = 0; m < I[0].objectives.size(); ++m) { std::sort(I.begin(), I.end(), [&m](const auto &i1, const auto &i2) { return i1.objectives[m] < i2.objectives[m]; }); double min = I[0].objectives[m], max = I.back().objectives[m], abs_max_min = std::fabs(max - min); if (abs_max_min != utils::Approx(0.0)) { for (int k = 1; k < I.size() - 1; ++k) { I[k].d = (std::fabs(I[k + 1].objectives[m] - I[k - 1].objectives[m]) / abs_max_min); } } } } } void Rank() { int popsize = parents.size(); parents.insert( parents.end(), std::make_move_iterator(offspring.begin()), std::make_move_iterator(offspring.end()) ); std::vector<Population> F; NonDominatedSort(parents, F); parents.resize(0); int i = 0; for (; i < F.size(); ++i) { CalculateCrowdingDistance(F[i]); if (parents.size() + F[i].size() > popsize) { break; } else { parents.insert(parents.end(), F[i].begin(), F[i].end()); } } if (parents.size() < popsize) { std::sort(F[i].begin(), F[i].end(), [](const auto& i1, const auto &i2){ return i1.d > i2.d; }); parents.insert(parents.end(), F[i].begin(), F[i].begin() + (popsize - parents.size())); } top_front = std::move(F[0]); } public: template<class... ChromosomeParams> void Init( int popsize, ChromosomeParams... params ) { indices.resize(popsize); std::iota(indices.begin(), indices.end(), 0); parents.reserve(popsize); offspring.reserve(popsize); parents.resize(0); while (popsize-- > 0) { parents.push_back(std::move(Chromosome(params...))); } #pragma omp parallel for for (int i = 0; i < parents.size(); ++i) { fitness_function(parents[i]); } } void Update() { Rank(); Select(); Reproduce(); #pragma omp parallel for for (int i = 0; i < offspring.size(); ++i) { fitness_function(offspring[i]); } } // TODO: Review performance Population TopFront() { std::sort( top_front.begin(), top_front.end(), [](const Chromosome &i1, const Chromosome &i2) { return i1.objectives[0] > i2.objectives[0]; } ); auto duplicates_begin = unique( top_front.begin(), top_front.end(), [](const Chromosome &i1, const Chromosome &i2) { for (int m = 0; m < i1.objectives.size(); ++m) { if (i1.objectives[m] != utils::Approx(i2.objectives[m])) { return false; } } return true; } ); if (duplicates_begin != top_front.end()) { top_front.erase(duplicates_begin, top_front.end()); } return std::move(top_front); } Population TopFront(Population R) { std::vector<Population> F; NonDominatedSort(R, F); top_front = std::move(F[0]); return std::move(TopFront()); } }; } #endif
VerletClusterLists.h
/** * @file VerletClusterLists.h * @author nguyen * @date 14.10.18 */ #pragma once #include <cmath> #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CompatibleTraversals.h" #include "autopas/containers/ParticleContainer.h" #include "autopas/containers/verletClusterLists/VerletClusterMaths.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/inBox.h" namespace autopas { template <class Particle> class VerletClustersTraversalInterface; /** * Particles are divided into clusters. * The VerletClusterLists class uses neighborhood lists for each cluster * to calculate pairwise interactions of particles. * It is optimized for a constant, i.e. particle independent, cutoff radius of * the interaction. * @tparam Particle */ template <class Particle> class VerletClusterLists : public ParticleContainer<Particle, FullParticleCell<Particle>> { /** * the index type to access the particle cells */ typedef VerletClusterMaths::index_t index_t; public: /** * Constructor of the VerletClusterLists class. * The neighbor lists are build using a estimated density. * The box is divided into cuboids with roughly the * same side length. * @param boxMin the lower corner of the domain * @param boxMax the upper corner of the domain * @param cutoff the cutoff radius of the interaction * @param skin the skin radius * @param clusterSize size of clusters */ VerletClusterLists(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff, double skin = 0, int clusterSize = 4) : ParticleContainer<Particle, FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin), _clusterSize(clusterSize), _numClusters(0), _boxMin(boxMin), _boxMax(boxMax), _skin(skin), _cutoff(cutoff), _neighborListIsNewton3(false), _interactionLengthSqr((cutoff + skin) * (cutoff + skin)) { rebuild(false); } ContainerOption getContainerType() override { return ContainerOption::verletClusterLists; } void iteratePairwise(TraversalInterface *traversal) override { AutoPasLog(debug, "Using traversal {}.", utils::StringUtils::to_string(traversal->getTraversalType())); auto *traversalInterface = dynamic_cast<VerletClustersTraversalInterface<Particle> *>(traversal); if (traversalInterface) { traversalInterface->setClusterLists(*this); } else { autopas::utils::ExceptionHandler::exception( "Trying to use a traversal of wrong type in VerletClusterLists::iteratePairwise. TraversalID: {}", traversal->getTraversalType()); } traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } /** * @copydoc VerletLists::addParticle() */ void addParticle(Particle &p) override { // add particle somewhere, because lists will be rebuild anyways _clusters[0].addParticle(p); } /** * @copydoc VerletLists::addHaloParticle() */ void addHaloParticle(Particle &haloParticle) override { autopas::utils::ExceptionHandler::exception("VerletClusterLists.addHaloParticle not yet implemented."); } bool updateHaloParticle(Particle &haloParticle) override { throw std::runtime_error("not yet implemented"); } /** * @copydoc VerletLists::deleteHaloParticles */ void deleteHaloParticles() override { // quick and dirty: iterate over all particles and delete halo particles // @todo: make this proper for (auto iter = this->begin(IteratorBehavior::haloOnly); iter.isValid(); ++iter) { if (not iter->isOwned()) { iter.deleteCurrentParticle(); } } } /** * @copydoc VerletLists::updateContainer() */ AUTOPAS_WARN_UNUSED_RESULT std::vector<Particle> updateContainer() override { AutoPasLog(debug, "updating container"); // first delete all particles this->deleteHaloParticles(); // next find invalid particles std::vector<Particle> invalidParticles; /// @todo: parallelize for (auto iter = this->begin(IteratorBehavior::ownedOnly); iter.isValid(); ++iter) { if (not utils::inBox(iter->getR(), _boxMin, _boxMax)) { invalidParticles.push_back(*iter); iter.deleteCurrentParticle(); } } return invalidParticles; } bool isContainerUpdateNeeded() override { autopas::utils::ExceptionHandler::exception("VerletClusterLists.isContainerUpdateNeeded not yet implemented"); return false; } TraversalSelectorInfo getTraversalSelectorInfo() override { return TraversalSelectorInfo(_cellsPerDim); } ParticleIteratorWrapper<Particle> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<Particle>( new internal::ParticleIterator<Particle, FullParticleCell<Particle>>(&this->_clusters)); } ParticleIteratorWrapper<Particle> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // @todo implement this if bounding boxes are here autopas::utils::ExceptionHandler::exception("VerletClusterLists.getRegionIterator not yet implemented."); return ParticleIteratorWrapper<Particle>(); } void rebuildNeighborLists(TraversalInterface *traversal) override { rebuild(traversal->getUseNewton3()); } /** * Helper method to iterate over all clusters. * @tparam LoopBody The type of the lambda to execute for all clusters. * @tparam inParallel If the iteration should be executed in parallel or sequential. See traverseClustersParallel() * for thread safety. * @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, int * clusterSize, std::vector<Particle*> clusterNeighborList. */ template <bool inParallel, class LoopBody> void traverseClusters(LoopBody &&loopBody) { if (inParallel) { traverseClustersParallel<LoopBody>(std::forward<LoopBody>(loopBody)); } else { traverseClustersSequential<LoopBody>(std::forward<LoopBody>(loopBody)); } } unsigned long getNumParticles() override { unsigned long sum = 0; for (size_t index = 0; index < _clusters.size(); index++) { sum += _clusters[index].numParticles(); } return sum; } /** * Returns the ClusterIndexMap for usage in the traversals of this container. * @return the ClusterIndexMap. */ const auto &getClusterIndexMap() const { return _clusterIndexMap; } /** * Returns the number of clusters in this container. * @return The number of clusters in this container. */ auto getNumClusters() const { return _numClusters; } /** * Returns the neighbor lists of this container. * @return the neighbor lists of this container. */ const auto &getNeighborLists() const { return _neighborLists; } /** * Returns the grid side length of the grids in the container. * @return the grid side length of the grids in the container. */ auto getGridSideLength() const { return _gridSideLength; } /** * Returns the number of grids per dimension on the container. * @return the number of grids per dimension on the container. */ auto getCellsPerDimension() const { return _cellsPerDim; } /** * Returns the 2D grid for the XY-plane of this container that defines the cluster towers. * @return the grids of this container for usage in traversals. */ auto &getGrids() { return _clusters; } /** * Returns the number of particles in each cluster. * @return the number of particles in each cluster. */ auto getClusterSize() const { return _clusterSize; } protected: /** * Helper method to sequentially iterate over all clusters. * @tparam LoopBody The type of the lambda to execute for all clusters. * @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t * clusterSize, std::vector<Particle*> clusterNeighborList. */ template <class LoopBody> void traverseClustersSequential(LoopBody &&loopBody) { for (index_t x = 0; x < _cellsPerDim[0]; x++) { for (index_t y = 0; y < _cellsPerDim[1]; y++) { index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim); auto &grid = _clusters[index]; auto &gridNeighborList = _neighborLists[index]; const index_t numClustersInGrid = grid.numParticles() / _clusterSize; for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) { Particle *iClusterStart = &grid[clusterInGrid * _clusterSize]; auto &clusterNeighborList = gridNeighborList[clusterInGrid]; loopBody(iClusterStart, _clusterSize, clusterNeighborList); } } } } /** * Helper method to iterate over all clusters in parallel. * * It is alwys safe to modify the particles in the cluster that is passed to the given loop body. However, when * modifying particles from other clusters, the caller has to make sure that no data races occur. Particles must not * be added or removed during the traversal. * @tparam LoopBody The type of the lambda to execute for all clusters. * @param loopBody The lambda to execute for all clusters. Parameters given are Particle* clusterStart, index_t * clusterSize, std::vector<Particle*> clusterNeighborList. */ template <class LoopBody> void traverseClustersParallel(LoopBody &&loopBody) { const index_t endX = _cellsPerDim[0]; const index_t endY = _cellsPerDim[1]; #if defined(AUTOPAS_OPENMP) // @todo: find sensible chunksize #pragma omp parallel for schedule(dynamic) collapse(2) #endif for (index_t x = 0; x < endX; x++) { for (index_t y = 0; y < endY; y++) { index_t index = VerletClusterMaths::index1D(x, y, _cellsPerDim); auto &grid = _clusters[index]; auto &gridNeighborList = _neighborLists[index]; const index_t numClustersInGrid = grid.numParticles() / _clusterSize; for (index_t clusterInGrid = 0; clusterInGrid < numClustersInGrid; clusterInGrid++) { Particle *iClusterStart = &grid[clusterInGrid * _clusterSize]; auto &clusterNeighborList = gridNeighborList[clusterInGrid]; loopBody(iClusterStart, _clusterSize, clusterNeighborList); } } } } /** * Recalculate grids and clusters, build verlet lists and pad clusters. * @param useNewton3 If the everything should be build using newton 3 or not. */ void rebuild(bool useNewton3) { std::vector<Particle> invalidParticles = collectParticlesAndClearClusters(); auto boxSize = ArrayMath::sub(_boxMax, _boxMin); _gridSideLength = estimateOptimalGridSideLength(invalidParticles.size(), boxSize); _gridSideLengthReciprocal = 1 / _gridSideLength; _cellsPerDim = calculateCellsPerDim(boxSize); // _cellsPerDim[2] is always 1 index_t numCells = _cellsPerDim[0] * _cellsPerDim[1]; // resize to number of grids _clusters.resize(numCells); _neighborLists.resize(numCells); sortParticlesIntoClusters(invalidParticles); // sort by last dimension and reserve space for dummy particles for (auto &cluster : _clusters) { cluster.sortByDim(2); size_t size = cluster.numParticles(); size_t rest = size % _clusterSize; if (rest > 0) cluster.reserve(size + (_clusterSize - rest)); } clearNeighborLists(); _numClusters = buildClusterIndexMap(); updateVerletLists(useNewton3); // fill last cluster with dummy particles, such that each cluster is a multiple of _clusterSize padClusters(); } /** * Takes all particles from all clusters and returns them. Clusters are cleared. * @return All particles in the container. */ std::vector<Particle> collectParticlesAndClearClusters() { std::vector<Particle> invalidParticles; for (auto &cluster : _clusters) { for (auto it = cluster.begin(); it.isValid(); ++it) { invalidParticles.push_back(*it); } cluster.clear(); } return invalidParticles; } /** * Estimates the optimal grid side length. * @param numParticles The number of particles in the container. * @param boxSize The size of the domain. * @return an estimated optimal grid side length. */ virtual double estimateOptimalGridSideLength(size_t numParticles, std::array<double, 3> boxSize) const { double volume = boxSize[0] * boxSize[1] * boxSize[2]; if (numParticles > 0) { // estimate particle density double density = numParticles / volume; return std::cbrt(_clusterSize / density); } else { return std::max(boxSize[0], boxSize[1]); } } /** * Calculates the cells per dimension in the container using the _gridSideLengthReciprocal. * @param boxSize the size of the domain. * @return the cells per dimension in the container. */ std::array<index_t, 3> calculateCellsPerDim(std::array<double, 3> boxSize) const { std::array<index_t, 3> cellsPerDim{}; for (int d = 0; d < 2; d++) { cellsPerDim[d] = static_cast<index_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal)); // at least one cell cellsPerDim[d] = std::max(cellsPerDim[d], 1ul); } cellsPerDim[2] = 1ul; return cellsPerDim; } /** * Sorts all passed particles in the appropriate clusters. * @param particles The particles to sort in the clusters. */ void sortParticlesIntoClusters(std::vector<Particle> &particles) { for (auto &particle : particles) { if (utils::inBox(particle.getR(), _boxMin, _boxMax)) { auto index = get1DIndexOfPosition(particle.getR()); _clusters[index].addParticle(particle); } } } /** * Clears all neighbor lists. */ void clearNeighborLists() { for (auto &verlet : _neighborLists) { verlet.clear(); } } /** * Update the verlet lists. * * @param useNewton3 If newton 3 should be used to build the neighbor lists or not. If true, only saves neighbor * clusters that have a higher index that the current cluster. (@see buildClusterIndexMap()) */ void updateVerletLists(bool useNewton3) { _neighborListIsNewton3 = useNewton3; const int boxRange = static_cast<int>(std::ceil((_cutoff + _skin) * _gridSideLengthReciprocal)); const int gridMaxX = _cellsPerDim[0] - 1; const int gridMaxY = _cellsPerDim[1] - 1; // for all grids for (int yi = 0; yi <= gridMaxY; yi++) { for (int xi = 0; xi <= gridMaxX; xi++) { auto &iGrid = _clusters[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)]; // calculate number of full clusters and rest index_t iSize = iGrid.numParticles() / _clusterSize; int iRest = iGrid.numParticles() % _clusterSize; const int minX = std::max(xi - boxRange, 0); const int minY = std::max(yi - boxRange, 0); const int maxX = std::min(xi + boxRange, gridMaxX); const int maxY = std::min(yi + boxRange, gridMaxY); auto &iNeighbors = _neighborLists[VerletClusterMaths::index1D(xi, yi, _cellsPerDim)]; if (iRest > 0) iNeighbors.resize(iSize + 1); else iNeighbors.resize(iSize); addClustersOfNeighborGridsAsNeighborsIfInRange(iGrid, iSize, iRest, iNeighbors, minX, maxX, minY, maxY, xi, yi); } } } /** * Iterates over neighbor grids of the i-th grid and adds all clusters in them that are within the cutoff radius to * the neighbor list of the clusters in the i-th grid. * @param iGrid The i-th grid. * @param iSize The number of full clusters in the i-th grid. * @param iRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise. * @param iNeighbors The neighbor list of the i-th grid. * @param minX * @param maxX * @param minY * @param maxY * @param xi The x-index of the i-th grid. * @param yi the y-index of the i-th grid. */ void addClustersOfNeighborGridsAsNeighborsIfInRange(FullParticleCell<Particle> &iGrid, index_t iSize, int iRest, std::vector<std::vector<Particle *>> &iNeighbors, const int minX, const int maxX, const int minY, const int maxY, const int xi, const int yi) { // for all neighbor grids for (int yj = minY; yj <= maxY; yj++) { double distY = std::max(0, std::abs(yi - yj) - 1) * _gridSideLength; for (int xj = minX; xj <= maxX; xj++) { double distX = std::max(0, std::abs(xi - xj) - 1) * _gridSideLength; // calculate distance in xy-plane and skip if already longer than cutoff double distXYsqr = distX * distX + distY * distY; if (distXYsqr <= _interactionLengthSqr) { auto &jGrid = _clusters[VerletClusterMaths::index1D(xj, yj, _cellsPerDim)]; // calculate number of full clusters and rest const index_t jSize = jGrid.numParticles() / _clusterSize; const int jRest = jGrid.numParticles() % _clusterSize; // for all clusters in the i-th grid for (index_t zi = 0; zi < iSize; zi++) { addAllJClustersAsNeighborIfInRange(iGrid, zi, _clusterSize, iNeighbors, jGrid, jSize, jRest, distXYsqr); } // special case: last cluster of iGrid not full if (iRest > 0) { addAllJClustersAsNeighborIfInRange(iGrid, iSize, iRest, iNeighbors, jGrid, jSize, jRest, distXYsqr); } } } } } /** * Adds all clusters in jGrid that are within the cutoff radius to the neighbor list of the given cluster in iGrid * (iClusterIndex). * @param iGrid The i-th grid. * @param iClusterIndex The index of the cluster to work on in the i-th grid. * @param iClusterSize The size of th cluster with index iClusterIndex in the i-th grid. * @param iNeighbors The neighbor list of the i-th grid. * @param jGrid The j-th grid. * @param jSize The number of full clusters in the j-th grid. * @param jRest If the last cluster is not full: The number of particles in the last cluster. 0 otherwise. * @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane. */ void addAllJClustersAsNeighborIfInRange(FullParticleCell<Particle> &iGrid, index_t iClusterIndex, int iClusterSize, std::vector<std::vector<Particle *>> &iNeighbors, FullParticleCell<Particle> &jGrid, index_t jSize, int jRest, double distXYsqr) { // bbox in z of iGrid double iBBoxBot = iGrid[iClusterIndex * _clusterSize].getR()[2]; double iBBoxTop = iGrid[iClusterIndex * _clusterSize + iClusterSize - 1].getR()[2]; auto &iClusterNeighborList = iNeighbors[iClusterIndex]; Particle *iClusterStart = &iGrid[iClusterIndex * _clusterSize]; // iterate over full clusters of j-th grid. for (index_t jClusterIndex = 0; jClusterIndex < jSize; jClusterIndex++) { Particle *jClusterStart = &jGrid[jClusterIndex * _clusterSize]; // If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise. if (_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart)) continue; addJClusterAsNeighborIfInRange(jGrid, jClusterStart, _clusterSize, iClusterNeighborList, distXYsqr, iBBoxBot, iBBoxTop); } // special case: last cluster not full if (jRest > 0) { Particle *jClusterStart = &jGrid[jSize * _clusterSize]; // If newton 3 is used, only add clusters as neighbors that have a equal or higher index. Skip otherwise. if (not(_neighborListIsNewton3 and _clusterIndexMap.at(iClusterStart) > _clusterIndexMap.at(jClusterStart))) { addJClusterAsNeighborIfInRange(jGrid, jClusterStart, jRest, iClusterNeighborList, distXYsqr, iBBoxBot, iBBoxTop); } } } /** * Adds the given cluster in jGrid to the given neighbor list (iClusterNeighborList), if it is within the cutoff * radius. * @param jGrid The j-th grid. * @param jClusterStart A pointer to the start of the cluster to work on in the j-th grid. * @param jClusterSize The size of the cluster to work on in the j-th grid. * @param iClusterNeighborList The neighbor list of the cluster in the i-th grid to fill the neighbors for. * @param distXYsqr The distance between the i-th grid and the j-th grid in the xy-plane. * @param iBBoxBot The bottom z-coordinate of the cluster in the i-th grid. * @param iBBoxTop The top z-coordinate of the cluster in the i-th grid. */ void addJClusterAsNeighborIfInRange(FullParticleCell<Particle> &jGrid, Particle *jClusterStart, int jClusterSize, std::vector<Particle *> &iClusterNeighborList, double distXYsqr, double iBBoxBot, double iBBoxTop) { // bbox in z of jGrid double jBBoxBot = jClusterStart->getR()[2]; double jBBoxTop = (jClusterStart + (jClusterSize - 1))->getR()[2]; double distZ = bboxDistance(iBBoxBot, iBBoxTop, jBBoxBot, jBBoxTop); if (distXYsqr + distZ * distZ <= _interactionLengthSqr) { iClusterNeighborList.push_back(jClusterStart); } } /** * Pad clusters with dummy particles * until each cluster is a multiple of _clusterSize. * Useful for SIMD vectorization. */ void padClusters() { for (index_t x = 0; x < _cellsPerDim[0]; x++) { for (index_t y = 0; y < _cellsPerDim[1]; y++) { auto &grid = _clusters[VerletClusterMaths::index1D(x, y, _cellsPerDim)]; index_t rest = grid.numParticles() % _clusterSize; if (rest > 0) { for (int i = rest; i < _clusterSize; i++) { Particle p = Particle(); p.setR({2 * x * _cutoff, 2 * y * _cutoff, 2 * _boxMax[2] + 2 * i * _cutoff}); grid.addParticle(p); } } } } } /** * Calculates the distance of two bounding boxes in one dimension. * @param min1 minimum coordinate of first bbox in tested dimension * @param max1 maximum coordinate of first bbox in tested dimension * @param min2 minimum coordinate of second bbox in tested dimension * @param max2 maximum coordinate of second bbox in tested dimension * @return distance */ inline double bboxDistance(const double min1, const double max1, const double min2, const double max2) const { if (max1 < min2) { return min2 - max1; } else if (min1 > max2) { return min1 - max2; } else { return 0; } } /** * Gets the 1d grid index containing a particle in given position. * @param pos the position of the particle * @return the index of the grid */ inline index_t get1DIndexOfPosition(const std::array<double, 3> &pos) const { std::array<index_t, 2> cellIndex{}; for (int dim = 0; dim < 2; dim++) { const long int value = (static_cast<long int>(floor((pos[dim] - _boxMin[dim]) * _gridSideLengthReciprocal))) + 1l; const index_t nonnegativeValue = static_cast<index_t>(std::max(value, 0l)); const index_t nonLargerValue = std::min(nonnegativeValue, _cellsPerDim[dim] - 1); cellIndex[dim] = nonLargerValue; /// @todo this is a sanity check to prevent doubling of particles, but /// could be done better! e.g. by border and flag manager if (pos[dim] >= _boxMax[dim]) { cellIndex[dim] = _cellsPerDim[dim] - 1; } else if (pos[dim] < _boxMin[dim]) { cellIndex[dim] = 0; } } return VerletClusterMaths::index1D(cellIndex[0], cellIndex[1], _cellsPerDim); } /** * Builds the _clusterIndexMap to be up to date with _clusters. * * Every cluster gets an index assigned. The indices are given in a way so that the VerletClustersColoringTraversal * works as easy as possible with newton 3. The newton 3 neighbor list just has to only save neighbors with a higher * index, and there will be no data races. * * For each cluster now holds (with x-axis as left <=> right, y-axis <=> as top <=> bottom): * - The indices of all clusters of the three color cells above and the color cell to the left are lower. * - The indices of all clusters of the three color cells below and the color cell to the right are higher. * - For all grids of the same color cell holds: * - The indices of all clusters of the three grids above and the grids to the left are lower. * - The indices of all clusters of the three grids below and the grids to the right are higher. * - For all clusters in the same grid holds: * - The indices of all clusters with a lower z-coordinate than the current cluster are lower. * - The indices of all clusters with a higher z-coordinate than the current cluster are higher. * * @return The number of clusters in the container. */ index_t buildClusterIndexMap() { index_t nextFreeMapIndex = 0; int gridsPerColoringCell = static_cast<int>(std::ceil((_cutoff + _skin) / _gridSideLength)); std::array<unsigned long, 3> coloringCellsPerDim{}; for (int i = 0; i < 3; i++) { coloringCellsPerDim[i] = static_cast<unsigned long>(std::ceil(_cellsPerDim[i] / static_cast<double>(gridsPerColoringCell))); } for (unsigned long yColorCell = 0; yColorCell < coloringCellsPerDim[1]; yColorCell++) { for (unsigned long xColorCell = 0; xColorCell < coloringCellsPerDim[0]; xColorCell++) { nextFreeMapIndex = indexColorCell(xColorCell, yColorCell, gridsPerColoringCell, nextFreeMapIndex); } } return nextFreeMapIndex; } private: /** * Indexes all clusters of one color cell (inserts value into _clusterIndexMap) starting with currentMapIndex. * * The scheme follows the documentation from buildClusterIndexMap(). * @param xColorCell The x coordinate of the color cell. * @param yColorCell The y coordinate of the color cell. * @param gridsPerColoringCell The number of grids in x and y dimension of this color cell. * @param currentMapIndex The first index to use. * @return The next available index after this cell. */ index_t indexColorCell(unsigned long xColorCell, unsigned long yColorCell, int gridsPerColoringCell, index_t currentMapIndex) { for (int yInner = 0; yInner < gridsPerColoringCell; yInner++) { for (int xInner = 0; xInner < gridsPerColoringCell; xInner++) { unsigned long y = yColorCell * gridsPerColoringCell + yInner; unsigned long x = xColorCell * gridsPerColoringCell + xInner; // Not every coloring cell has to have gridsPerColoringCell grids in every direction. if (x >= _cellsPerDim[0] or y >= _cellsPerDim[1]) { continue; } unsigned long gridIndex1D = VerletClusterMaths::index1D(x, y, _cellsPerDim); auto &currentGrid = _clusters[gridIndex1D]; auto numClusters = currentGrid.numParticles() / _clusterSize; int rest = currentGrid.numParticles() % _clusterSize; if (rest > 0) numClusters++; for (unsigned long currentCluster = 0; currentCluster < numClusters; currentCluster++) { Particle *clusterStart = &currentGrid[currentCluster * _clusterSize]; _clusterIndexMap[clusterStart] = currentMapIndex++; } } } return currentMapIndex; } private: /** * Neighbors of clusters for each grid. If it uses newton 3 is saved in _neighborListIsNewton3. * If it uses newton 3: Only the neighbor clusters that have a higher index are saved. (@see _clusterIndexMap) */ std::vector<std::vector<std::vector<Particle *>>> _neighborLists; /** * internal storage, particles are split into a grid in xy-dimension */ std::vector<FullParticleCell<Particle>> _clusters; /** * The number of particles in a full cluster. */ int _clusterSize; /** * The number of clusters. This is not equal to _clusters.size(), as every grid might contain multiple clusters. */ index_t _numClusters; /** * Box min of the domain. */ std::array<double, 3> _boxMin; /** * Box max of the domain. */ std::array<double, 3> _boxMax; /** * Side length of xy-grid. */ double _gridSideLength{0.}; /** * Reciprocal of _gridSideLength. */ double _gridSideLengthReciprocal{0.}; /** * Dimensions of the 2D xy-grid. */ std::array<index_t, 3> _cellsPerDim{}; /** * The skin radius. */ double _skin; /** * The cutoff. */ double _cutoff; /** * Specifies if the neighbor list uses newton 3 or not. */ bool _neighborListIsNewton3; /** * Maps indices to the starting pointers for each cluster. For the idea behind the assignment, @see * buildClusterIndexMap(). */ std::unordered_map<Particle *, index_t> _clusterIndexMap; /** * (_cutoff + _skin)^2. */ double _interactionLengthSqr; }; } // namespace autopas
3302.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(static, 1) num_threads(1) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
main.c
#include <omp.h> #include <stdio.h> #include <stdint.h> #define MAX_N 16 char BLOCK[MAX_N*MAX_N]; inline int abs(int x) { // mask of sign bit uint32_t y = x >> 31; // toggle the sign bit x ^= y; // add 1 if x is negative (2's complement) x += y & 1; return x; } int place(int board[], int N, int row, int col) { if (BLOCK[row*N + col] == '*') { // blockage return 0; } for (int i = 0; i < row; i++) { if (board[i] == col) { // column conflict return 0; } else if (abs(board[i] - col) == abs(i-row)) { // diagonal conflict return 0; } } return 1; } int _queen(int board[], int r, int N, int count) { for (int c = 0; c < N; c++) { //printf("r=%d, c=%d\n", row, c); if (place(board, N, r, c)) { board[r] = c; if (r == N-1) { //printf(".. found!\n"); count++; } else { //printf(".. deeper\n"); count = _queen(board, r+1, N, count); } } } return count; } int queen(int N) { int count = 0; int board[MAX_N] = {0}; #pragma omp parallel for collapse(3) \ firstprivate(board) \ reduction(+ : count) \ schedule(dynamic) for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { for (int k = 0; k < N; k++) { if (!place(board, N, 0, i)) { continue; } board[0] = i; if (!place(board, N, 1, j)) { continue; } board[1] = j; if (!place(board, N, 2, k)) { continue; } board[2] = k; count += _queen(board, 3, N, 0); } } } return count; //return _queen(0, N, 0); } int main(void) { int N, n_case = 0; while (scanf("%d", &N) != EOF) { n_case++; for (int i = 0; i < N*N; i += N) { scanf("%s", &BLOCK[i]); } /* for (int i = 0; i < N*N; i++) { printf("%c ", BLOCK[i]); if ((i+1)%N == 0) { printf("\n"); } } */ printf("Case %d: %d\n", n_case, queen(N)); } return 0; }
zgels.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear systems involving an m-by-n * matrix A, or its conjugate-transpose, using a QR or LQ factorization of A. * It is assumed that A has full rank. The following options are provided: * * # trans = PlasmaNoTrans and m >= n: find the least squares solution of an * overdetermined system, i.e., solve the least squares problem: * minimize || B - A*X ||. * * # trans = PlasmaNoTrans and m < n: find the minimum norm solution of an * underdetermined system A * X = B. * * # trans = Plasma_ConjTrans and m >= n: find the minimum norm solution of an * underdetermined system A^H * X = B. * * # trans = Plasma_ConjTrans and m < n: find the least squares solution of an * overdetermined system, i.e., solve the least squares problem: * minimize || B - A^H*X ||. * * Several right-hand side vectors B and solution vectors X can be handled in a * single call; they are stored as the columns of the m-by-nrhs right-hand side * matrix B and the n-by-nrhs solution matrix X. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * - Plasma_ConjTrans: the linear system involves A^H * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns of the * matrices B and X. nrhs >= 0. * * @param[in,out] pA * On entry, pointer to the m-by-n matrix A. * On exit, * if m >= n, A is overwritten by details of its QR factorization as * returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization as * returned by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * * @param[out] T * On exit, auxiliary factorization data. * Matrix of T is allocated inside this function and needs to be * destroyed by plasma_desc_destroy. * * @param[in,out] pB * On entry, pointer to the m-by-nrhs matrix B of right-hand side * vectors, stored columnwise; * On exit, if return value = 0, B is overwritten by the solution * vectors, stored columnwise: * if trans = PlasmaNoTrans and m >= n, rows 1 to n of B contain the * least squares solution vectors; the residual sum of squares * for the solution in each column is given by the sum of * squares of the modulus of elements n+1 to m in that column; * if trans = PlasmaNoTrans and m < n, rows 1 to n of B contain the * minimum norm solution vectors; * if trans = Plasma_ConjTrans and m >= n, rows 1 to m of B contain the * minimum norm solution vectors; * if trans = Plasma_ConjTrans and m < n, rows 1 to m of B contain the * least squares solution vectors; the residual sum of squares * for the solution in each column is given by the sum of * squares of the modulus of elements M+1 to N in that column. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zgels * @sa plasma_cgels * @sa plasma_dgels * @sa plasma_sgels * @sa plasma_zgeqrf * @sa plasma_zgeqrs * ******************************************************************************/ int plasma_zgels(plasma_enum_t trans, int m, int n, int nrhs, plasma_complex64_t *pA, int lda, plasma_desc_t *T, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_error("illegal value of trans"); return PlasmaErrorIllegalValue; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (lda < imax(1, m)) { plasma_error("illegal value of lda"); return -6; } if (ldb < imax(1, imax(m, n))) { plasma_error("illegal value of ldb"); return -9; } // quick return if (imin(m, imin(n, nrhs)) == 0) { for (int i = 0; i < imax(m, n); i++) for (int j = 0; j < nrhs; j++) pB[j*ldb+i] = 0.0; return PlasmaSuccess; } // Tune parameters. if (plasma->tuning) { if (m < n) plasma_tune_gelqf(plasma, PlasmaComplexDouble, m, n); else plasma_tune_geqrf(plasma, PlasmaComplexDouble, m, n); } // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; plasma_enum_t householder_mode = plasma->householder_mode; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, imax(m, n), nrhs, 0, 0, imax(m, n), nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Prepare descriptor T. retval = plasma_descT_create(A, ib, householder_mode, T); if (retval != PlasmaSuccess) { plasma_error("plasma_descT_create() failed"); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = nb + ib*nb; // geqrt/gelqt: tau + work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zgels(trans, A, *T, B, work, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(A, pA, lda, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gels * * Solves overdetermined or underdetermined linear * system of equations using the tile QR or the tile LQ factorization. * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] trans * - PlasmaNoTrans: the linear system involves A * - Plasma_ConjTrans: the linear system involves A^H * * @param[in,out] A * Descriptor of matrix A stored in the tile layout. * On exit, * if m >= n, A is overwritten by details of its QR factorization * as returned by plasma_zgeqrf; * if m < n, A is overwritten by details of its LQ factorization * as returned by plasma_zgelqf. * * @param[out] T * Descriptor of matrix T. * Auxiliary factorization data, computed by * plasma_zgeqrf or plasma_zgelqf. * * @param[in,out] B * Descriptor of matrix B. * On entry, right-hand side matrix B in the tile layout. * On exit, solution matrix X in the tile layout. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For QR/LQ factorizations used in GELS, it contains preallocated * space for tau and work arrays. * Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zgels * @sa plasma_omp_cgels * @sa plasma_omp_dgels * @sa plasma_omp_sgels * ******************************************************************************/ void plasma_omp_zgels(plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t B, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_error("illegal value of trans"); plasma_request_fail(sequence, request, PlasmaErrorNotSupported); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid descriptor T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid descriptor B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0 || A.n == 0 || B.n == 0) { // Zero matrix B. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, B, sequence, request); return; } //=============================== // Solve using QR factorization. //=============================== if (A.m >= A.n) { // Compute QR factorization of A. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgeqrf_tree(A, T, work, sequence, request); } else { plasma_pzgeqrf(A, T, work, sequence, request); } if (trans == PlasmaNoTrans) { // Find Y = Q^H * B. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmqr_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmqr(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } // Solve R * X = Y. plasma_pztrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); } else { // trans == Plasma_ConjTrans // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.n, 0, A.m-A.n, B.n), sequence, request); // Solve R^H * Y = B. plasma_pztrsm( PlasmaLeft, PlasmaUpper, Plasma_ConjTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.n, A.n), plasma_desc_view(B, 0, 0, A.n, B.n), sequence, request); // Find X = Q * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmqr_tree(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } else { plasma_pzunmqr(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } } } //=============================== // Solve using LQ factorization. //=============================== else { // Compute LQ factorization of A. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzgelqf_tree(A, T, work, sequence, request); } else { plasma_pzgelqf(A, T, work, sequence, request); } if (trans == PlasmaNoTrans) { // Zero the trailing block of the right-hand-side matrix. // B has less rows than X. plasma_pzlaset(PlasmaGeneral, 0.0, 0.0, plasma_desc_view(B, A.m, 0, A.n-A.m, B.n), sequence, request); // Solve L * Y = B. plasma_pztrsm( PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); // Find X = Q^H * Y. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, Plasma_ConjTrans, A, T, B, work, sequence, request); } } else { // trans == Plasma_ConjTrans // Find Y = Q * B. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } else { plasma_pzunmlq(PlasmaLeft, PlasmaNoTrans, A, T, B, work, sequence, request); } // Solve L^H * X = Y. plasma_pztrsm( PlasmaLeft, PlasmaLower, Plasma_ConjTrans, PlasmaNonUnit, 1.0, plasma_desc_view(A, 0, 0, A.m, A.m), plasma_desc_view(B, 0, 0, A.m, B.n), sequence, request); } } }
GB_unaryop__minv_int32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_int16 // op(A') function: GB_tran__minv_int32_int16 // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_int16 ( int32_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ZQ_CNN_MTCNN_NCHWC.h
#ifndef _ZQ_CNN_MTCNN_NCHWC_H_ #define _ZQ_CNN_MTCNN_NCHWC_H_ #pragma once #include "ZQ_CNN_Net_NCHWC.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { class ZQ_CNN_MTCNN_NCHWC { public: using string = std::string; ZQ_CNN_MTCNN_NCHWC() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN_NCHWC() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net_NCHWC<ZQ_CNN_Tensor4D_NCHWC4>> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_NCHWC4> pnet_images; ZQ_CNN_Tensor4D_NCHWC4 input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } bool InitFromBuffer( const char* pnet_param, __int64 pnet_param_len, const char* pnet_model, __int64 pnet_model_len, const char* rnet_param, __int64 rnet_param_len, const char* rnet_model, __int64 rnet_model_len, const char* onet_param, __int64 onet_param_len, const char* onet_model, __int64 onet_model_len, int thread_num = 1, bool has_lnet = false, const char* lnet_param = 0, __int64 lnet_param_len = 0, const char* lnet_model = 0, __int64 lnet_model_len = 0) { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) lnet.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFromBuffer(pnet_param, pnet_param_len, pnet_model, pnet_model_len, true, 1e-9, true) && rnet[i].LoadFromBuffer(rnet_param, rnet_param_len, rnet_model, rnet_model_len, true, 1e-9, true) && onet[i].LoadFromBuffer(onet_param, onet_param_len, onet_model, onet_model_len, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFromBuffer(lnet_param, lnet_param_len, lnet_model, lnet_model_len, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.1f M, onet = %.1f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.1f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, _width, _height); } double t4 = omp_get_wtime(); if (!_Lnet_stage(thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, _width, _height); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetAlignSize(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetAlignSize(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_NCHWC4* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetAlignSize(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); if (width != _width || height != _height) return false; if (!input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(maps, mapH, mapW); } else { _compute_Pnet_multi_thread(maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2)); return true; } bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_NCHWC4* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_NCHWC4* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_NCHWC4* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_NCHWC4* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_NCHWC4* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) { keyPoint_ptr = keyPoint->GetFirstPixelPtr(); } int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_NCHWC4* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NCHWC4> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_NCHWC4* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
intersectreduce.h
/****************************************************************************** * ** Copyright (c) 2016, Intel Corporation ** * ** All rights reserved. ** * ** ** * ** Redistribution and use in source and binary forms, with or without ** * ** modification, are permitted provided that the following conditions ** * ** are met: ** * ** 1. Redistributions of source code must retain the above copyright ** * ** notice, this list of conditions and the following disclaimer. ** * ** 2. Redistributions in binary form must reproduce the above copyright ** * ** notice, this list of conditions and the following disclaimer in the ** * ** documentation and/or other materials provided with the distribution. ** * ** 3. Neither the name of the copyright holder nor the names of its ** * ** contributors may be used to endorse or promote products derived ** * ** from this software without specific prior written permission. ** * ** ** * ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** * ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** * ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** * ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** * ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** * ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** * ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** * ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** * ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** * ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** * ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * ******************************************************************************/ /* Michael Anderson (Intel Corp.) * * ******************************************************************************/ #ifndef SRC_SINGLENODE_INTERSECTREDUCE_H_ #define SRC_SINGLENODE_INTERSECTREDUCE_H_ #include <algorithm> #include "GMDP/utils/bitvector.h" template <typename Ta, typename Tb, typename Tc> void intersect_dense_segment(Ta* v1, int * bv1, int * nnz, int num_ints, Tb * v2, int * bv2, Tc * v3, int * bv3, void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp) { #pragma omp parallel for for(int i = 0 ; i < num_ints ; i++) { bv3[i] = bv1[i] & bv2[i]; } int tmp_nnz = 0; #pragma omp parallel for reduction(+:tmp_nnz) for(int ii = 0 ; ii < num_ints ; ii++) { int cnt = _popcnt32(bv3[ii]); if(cnt == 0) continue; tmp_nnz += cnt; for(int i = ii*32 ; i < (ii+1)*32 ; i++) { if(get_bitvector(i, bv3)) { Ta tmp = v1[i]; op_fp(v1[i], v2[i], &(v3[i]), vsp); } } } *nnz = tmp_nnz; } template <typename Ta, typename Tb, typename Tc> void intersect_segment(const DenseSegment<Ta> * s1, const DenseSegment<Tb> * s2, DenseSegment<Tc> * s3, void (*op_fp)(const Ta&, const Tb&, Tc*, void*), void* vsp) { s3->alloc(); s3->initialize(); if(!s1->properties->uninitialized && !s2->properties->uninitialized) { intersect_dense_segment(s1->properties->value, s1->properties->bit_vector, &(s3->properties->nnz), s1->num_ints, s2->properties->value, s2->properties->bit_vector, s3->properties->value, s3->properties->bit_vector, op_fp, vsp); } } #endif // SRC_SINGLENODE_INTERSECTREDUCE_H_
mkldnn_common.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2019 by Contributors * \file mkldnn_common.h * \brief Common header file for MKLDNN backend subgraph * \author Ciyong Chen */ #ifndef MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_ #define MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_ #if MXNET_USE_MKLDNN == 1 #include <vector> namespace mxnet { namespace op { template <typename DType> static std::vector<float> GetWeightScales(const NDArray &weight, const NDArray *bias, const float data_scale, bool weight_channelwise_scale) { auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); std::vector<float> weight_scales; const DType *weight_ptr = weight.data().dptr<DType>(); const DType *bias_ptr = bias? bias->data().dptr<DType>() : nullptr; const auto wshape = weight.shape(); size_t channel = wshape[0]; size_t offset = wshape.ProdShape(1, wshape.ndim()); std::vector<DType> weight_c_min(channel, MaxValue<DType>()); std::vector<DType> weight_c_max(channel, MinValue<DType>()); for (int c = 0; c < static_cast<int>(channel); ++c) { const DType *p1 = weight_ptr + c * offset; for (size_t k = 0; k < offset; ++k) { if (weight_c_min[c] > p1[k]) weight_c_min[c] = p1[k]; if (weight_c_max[c] < p1[k]) weight_c_max[c] = p1[k]; } } if (weight_channelwise_scale) { weight_scales.resize(channel); #pragma omp parallel for num_threads(nthreads) for (int c = 0; c < static_cast<int>(channel); ++c) { float scale = GetQuantizeScale(mshadow::kInt8, weight_c_min[c], weight_c_max[c]); if (bias_ptr && bias_ptr[c]) { // avoid overflow on bias // TODO(zhennan): mkldnn has bug to handle INT_MAX in bias, so set the maximum value of bias // to INT_MAX / 2. float scale_max = static_cast<float>(bias_ptr[c] > 0 ? MaxValue<int32_t>() : MinValue<int32_t>()) / 2 / bias_ptr[c] / data_scale; scale = Min(scale, scale_max); } weight_scales[c] = scale; } } else { DType total_min = weight_c_min[0]; DType total_max = weight_c_max[0]; for (size_t c = 0; c < channel; ++c) { if (total_min > weight_c_min[c]) total_min = weight_c_min[c]; if (total_max < weight_c_max[c]) total_max = weight_c_max[c]; } weight_scales.resize(3); weight_scales[0] = GetQuantizeScale(mshadow::kInt8, total_min, total_max); weight_scales[1] = total_min; weight_scales[2] = total_max; } return weight_scales; } static void ConvertWeightBias2MKLDNN(NDArray *weight, NDArray *bias, bool has_bias, const mkldnn::memory::desc &weight_md, const mkldnn::memory::desc *bias_md, const int num_group, float data_scale, const std::vector<float> &weight_scales, const bool submit = true) { MKLDNNStream *stream = MKLDNNStream::Get(); const auto new_weight = NDArray(weight_md); const auto conv_weights_memory = new_weight.GetMKLDNNData(); mkldnn::primitive_attr weight_attr; if (weight_scales.size()) { const int weight_mask = (weight_scales.size()) == 1 ? 0 : 1; weight_attr.set_output_scales(weight_mask, weight_scales); } auto default_weights_memory = GetWeights(*weight, num_group); if (default_weights_memory == nullptr) default_weights_memory = weight->GetMKLDNNData(); const auto weight_reorder_pd = mkldnn::reorder::primitive_desc(*default_weights_memory, *conv_weights_memory, weight_attr); MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::reorder(weight_reorder_pd), {{MKLDNN_ARG_FROM, *default_weights_memory}, {MKLDNN_ARG_TO, *conv_weights_memory}}); NDArray new_bias; if (has_bias && data_scale) { std::vector<float> bias_scales(weight_scales.size()); for (size_t c = 0; c < weight_scales.size(); ++c) { bias_scales[c] = weight_scales[c] * data_scale; } new_bias = NDArray(*bias_md); const auto conv_bias_memory = new_bias.GetMKLDNNData(); const int bias_mask = (bias_scales.size()) == 1 ? 0 : 1; mkldnn::primitive_attr bias_attr; bias_attr.set_output_scales(bias_mask, bias_scales); auto bias_weights_memory = bias->GetMKLDNNData(); const auto bias_reorder_pd = mkldnn::reorder::primitive_desc(*bias_weights_memory, *conv_bias_memory, bias_attr); MKLDNNStream::Get()->RegisterPrimArgs( mkldnn::reorder(bias_reorder_pd), {{MKLDNN_ARG_FROM, *bias_weights_memory}, {MKLDNN_ARG_TO, *conv_bias_memory}}); } if (submit) stream->Submit(); *weight = new_weight; if (has_bias && data_scale) *bias = new_bias; } } // namespace op } // namespace mxnet #endif // if MXNET_USE_MKLDNN == 1 #endif // MXNET_OPERATOR_SUBGRAPH_MKLDNN_MKLDNN_COMMON_H_
bfecc_convection.h
// KRATOS ___ ___ _ ___ __ ___ ___ ___ ___ // / __/ _ \| \| \ \ / /__| \_ _| __| __| // | (_| (_) | .` |\ V /___| |) | || _|| _| // \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_BFECC_CONVECTION_INCLUDED ) #define KRATOS_BFECC_CONVECTION_INCLUDED #define PRESSURE_ON_EULERIAN_MESH #define USE_FEW_PARTICLES // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "utilities/geometry_utilities.h" #include "geometries/tetrahedra_3d_4.h" #include "includes/variables.h" #include "spatial_containers/spatial_containers.h" #include "utilities/timer.h" #include "utilities/binbased_fast_point_locator.h" #include <boost/timer.hpp> #include "utilities/timer.h" #include "utilities/openmp_utils.h" namespace Kratos { template<std::size_t TDim> class BFECCConvection { public: KRATOS_CLASS_POINTER_DEFINITION(BFECCConvection<TDim>); BFECCConvection(typename BinBasedFastPointLocator<TDim>::Pointer pSearchStructure) : mpSearchStructure(pSearchStructure) { } ~BFECCConvection() { } //********************************************************************************************** //********************************************************************************************** void BFECCconvect(ModelPart& rModelPart, const Variable< double >& rVar, const Variable<array_1d<double,3> >& conv_var, const double substeps) { KRATOS_TRY const double dt = rModelPart.GetProcessInfo()[DELTA_TIME]; //do movement Vector N(TDim + 1); Vector N_valid(TDim + 1); const int max_results = 10000; typename BinBasedFastPointLocator<TDim>::ResultContainerType results(max_results); const int nparticles = rModelPart.Nodes().size(); PointerVector< Element > elem_backward( rModelPart.Nodes().size()); std::vector< Vector > Ns( rModelPart.Nodes().size()); std::vector< bool > found( rModelPart.Nodes().size()); //FIRST LOOP: estimate rVar(n+1) #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> bckPos = iparticle->Coordinates(); const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,bckPos,vel, N,N_valid, pelement,pelement_valid, result_begin, max_results, -1.0, substeps, conv_var, has_valid_elem_pointer); found[i] = is_found; if(is_found) { //save position backwards elem_backward(i) = pelement; Ns[i] = N; Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } else if(has_valid_elem_pointer) { //save position backwards elem_backward(i) = pelement_valid; Ns[i] = N_valid; Geometry< Node < 3 > >& geom = pelement_valid->GetGeometry(); double phi1 = N[0] * ( geom[0].FastGetSolutionStepValue(rVar,1)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N_valid[k] * ( geom[k].FastGetSolutionStepValue(rVar,1) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } } //now obtain the value AT TIME STEP N by taking it from N+1 #pragma omp parallel for firstprivate(results,N,N_valid) for (int i = 0; i < nparticles; i++) { typename BinBasedFastPointLocator<TDim>::ResultIteratorType result_begin = results.begin(); ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; Element::Pointer pelement; Element::Pointer pelement_valid; array_1d<double,3> fwdPos = iparticle->Coordinates(); const array_1d<double,3>& vel = iparticle->FastGetSolutionStepValue(conv_var,1); bool has_valid_elem_pointer = false; bool is_found = ConvectBySubstepping(dt,fwdPos,vel, N, N_valid, pelement, pelement_valid, result_begin, max_results, 1.0, substeps, conv_var,has_valid_elem_pointer); if(is_found) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); double phi_old = N[0] * ( geom[0].FastGetSolutionStepValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi_old += N[k] * ( geom[k].FastGetSolutionStepValue(rVar) ); } //store correction iparticle->GetValue(rVar) = 1.5*iparticle->FastGetSolutionStepValue(rVar,1) - 0.5 * phi_old; // iparticle->FastGetSolutionStepValue(rVar) = iparticle->GetValue(rVar) - 0.5 * (phi2 - iparticle->FastGetSolutionStepValue(rVar,1)); } else { iparticle->GetValue(rVar) = iparticle->FastGetSolutionStepValue(rVar,1); } } #pragma omp parallel for for (int i = 0; i < nparticles; i++) { ModelPart::NodesContainerType::iterator iparticle = rModelPart.NodesBegin() + i; bool is_found = found[i]; if(is_found) { Vector N = Ns[i]; Geometry< Node < 3 > >& geom = elem_backward[i].GetGeometry(); double phi1 = N[0] * ( geom[0].GetValue(rVar)); for (unsigned int k = 1; k < geom.size(); k++) { phi1 += N[k] * ( geom[k].GetValue(rVar) ); } iparticle->FastGetSolutionStepValue(rVar) = phi1; } // else // std::cout << "it should find it" << std::endl; } KRATOS_CATCH("") } bool ConvectBySubstepping( const double dt, array_1d<double,3>& position, //IT WILL BE MODIFIED const array_1d<double,3>& initial_velocity, Vector& N, Vector& N_valid, Element::Pointer& pelement, Element::Pointer& pelement_valid, typename BinBasedFastPointLocator<TDim>::ResultIteratorType& result_begin, const unsigned int max_results, const double velocity_sign, const double subdivisions, const Variable<array_1d<double,3> >& conv_var, bool& has_valid_elem_pointer) { bool is_found = false; array_1d<double,3> veulerian; const double small_dt = dt/subdivisions; if(velocity_sign > 0.0) //going from the past to the future { noalias(position) += small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); const double new_step_factor = static_cast<double>(substep)/subdivisions; const double old_step_factor = (1.0 - new_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) += small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } else //going from the future to the past { noalias(position) -= small_dt*initial_velocity; unsigned int substep=0; while(substep++ < subdivisions) { is_found = mpSearchStructure->FindPointOnMesh(position, N, pelement, result_begin, max_results); if (is_found == true) { Geometry< Node < 3 > >& geom = pelement->GetGeometry(); //this factors get inverted from the other case const double old_step_factor = static_cast<double>(substep)/subdivisions; const double new_step_factor = (1.0 - old_step_factor); noalias(veulerian) = N[0] * ( new_step_factor*geom[0].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[0].FastGetSolutionStepValue(conv_var,1)); for (unsigned int k = 1; k < geom.size(); k++) noalias(veulerian) += N[k] * ( new_step_factor*geom[k].FastGetSolutionStepValue(conv_var) + old_step_factor*geom[k].FastGetSolutionStepValue(conv_var,1) ); noalias(position) -= small_dt*veulerian; N_valid = N; pelement_valid = pelement; has_valid_elem_pointer = true; } else break; } } return is_found; } void ResetBoundaryConditions(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; if (inode->IsFixed(rVar)) { inode->FastGetSolutionStepValue(rVar)=inode->GetSolutionStepValue(rVar,1); } } } KRATOS_CATCH("") } void CopyScalarVarToPreviousTimeStep(ModelPart& rModelPart, const Variable< double >& rVar) { KRATOS_TRY ModelPart::NodesContainerType::iterator inodebegin = rModelPart.NodesBegin(); vector<unsigned int> node_partition; #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif OpenMPUtils::CreatePartition(number_of_threads, rModelPart.Nodes().size(), node_partition); #pragma omp parallel for for(int kkk=0; kkk<number_of_threads; kkk++) { for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++) { ModelPart::NodesContainerType::iterator inode = inodebegin+ii; inode->GetSolutionStepValue(rVar,1) = inode->FastGetSolutionStepValue(rVar); } } KRATOS_CATCH("") } private: typename BinBasedFastPointLocator<TDim>::Pointer mpSearchStructure; }; } // namespace Kratos. #endif // KRATOS_BFECC_CONVECTION_INCLUDED defined
set_BCs.h
#pragma omp target teams distribute parallel for thread_limit(BLOCK_SIZE) for (int ind = 1; ind < NUM+1; ind++) { // left boundary u(0, ind) = ZERO; v(0, ind) = -v(1, ind); // right boundary u(NUM, ind) = ZERO; v(NUM + 1, ind) = -v(NUM, ind); // bottom boundary u(ind, 0) = -u(ind, 1); v(ind, 0) = ZERO; // top boundary u(ind, NUM + 1) = TWO - u(ind, NUM); v(ind, NUM) = ZERO; if (ind == NUM) { // left boundary u(0, 0) = ZERO; v(0, 0) = -v(1, 0); u(0, NUM + 1) = ZERO; v(0, NUM + 1) = -v(1, NUM + 1); // right boundary u(NUM, 0) = ZERO; v(NUM + 1, 0) = -v(NUM, 0); u(NUM, NUM + 1) = ZERO; v(NUM + 1, NUM + 1) = -v(NUM, NUM + 1); // bottom boundary u(0, 0) = -u(0, 1); v(0, 0) = ZERO; u(NUM + 1, 0) = -u(NUM + 1, 1); v(NUM + 1, 0) = ZERO; // top boundary u(0, NUM + 1) = TWO - u(0, NUM); v(0, NUM) = ZERO; u(NUM + 1, NUM + 1) = TWO - u(NUM + 1, NUM); v(ind, NUM + 1) = ZERO; } // end if }
rectangle_cmap.h
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { auto tid = omp_get_thread_num(); auto &cmap = cmaps.at(tid); for (auto v1 : g.N(v0)) { for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 1; } if (v1 >= v0) break; for (auto v2 : g.N(v0)) { if (v2 >= v1) break; for (auto v3 : g.N(v2)) { if (v3 >= v0) break; #ifdef PROFILE_LATENCY auto c1 = read_cycle(); #endif if (cmap[v3] == 1) counter ++; #ifdef PROFILE_LATENCY auto c2 = read_cycle(); if (nqueries[tid] < NUM_SAMPLES) { auto tick = c2 - c1; if (tick < 500) { nticks[tid] += tick; nqueries[tid] ++; } } #endif } } for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 0; } } }
spmv.c
////Example of sparse matrix-vector multiply, using CSR (compressed sparse row format). #include <stdio.h> #include <stdlib.h> #include <string.h> // Add timing support #include <sys/timeb.h> #define REAL float double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } //#define DEFAULT_DIMSIZE 256 void print_array(char *title, char *name, REAL *A, int n, int m) { printf("%s:\n", title); int i, j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ", name, i, j, A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ int main(int argc, char *argv[]) { int *ia, *ja; REAL *a, *x, *y; int row, i, j, idx, n, nnzMax, nnz, nrows; REAL ts, t, rate; n = 10240; //n = 24; if (argc > 1) n = atoi(argv[1]); nrows = n * n; nnzMax = nrows * 5; ia = (int*)malloc(nrows*sizeof(int)); ja = (int*)malloc(nnzMax*sizeof(int)); a = (REAL*)malloc(nnzMax*sizeof(REAL)); /* Allocate the source and result vectors */ x = (REAL*)malloc(nrows*sizeof(REAL)); y = (REAL*)malloc(nrows*sizeof(REAL)); row = 0; nnz = 0; for (i=0; i<n; i++) { for (j=0; j<n; j++) { ia[row] = nnz; if (i>0) { ja[nnz] = row - n; a[nnz] = -1.0; nnz++; } if (j>0) { ja[nnz] = row - 1; a[nnz] = -1.0; nnz++; } ja[nnz] = row; a[nnz] = 4.0; nnz++; if (j<n-1) { ja[nnz] = row + 1; a[nnz] = -1.0; nnz++; } if (i<n-1) { ja[nnz] = row + n; a[nnz] = -1.0; nnz++; } row++; } } ia[row] = nnz; /* Create the source (x) vector */ for (i=0; i<nrows; i++) x[i] = 1.0; double elapsed = read_timer(); int flops = 0; for (row=0; row<nrows; row++) { REAL sum = 0.0; #pragma omp simd reduction(+:sum,flops) for (idx=ia[row]; idx<ia[row+1]; idx++) { sum += a[idx] * x[ja[idx]]; flops += 2; } y[row] = sum; } elapsed = read_timer() - elapsed; double gflops = flops / (1.0e9 * elapsed); printf("seq elasped time(s): %.4f\n", elapsed); printf("GFlops: %.4f\n", gflops); int errors = 0; for (row=0; row<nrows; row++) { if (y[row] < 0) { //fprintf(stderr,"y[%d]=%f, fails consistency test\n", row, y[row]); ++errors; } } printf("Errors: %d\n", errors); free(ia); free(ja); free(a); free(x); free(y); return 0; }
basic_omp.c
#include <stdio.h> #include <omp.h> int main() { int* arr = (int*) malloc(sizeof(int) * 4); arr[0] = 42; arr[1] = 42; arr[2] = 42; arr[3] = 42; printf("BEFORE: [%d, %d, %d, %d]\n", arr[0], arr[1], arr[2], arr[3]); #pragma omp parallel shared(arr) { int rank = omp_get_thread_num(); arr[rank] = rank; } printf("AFTER: [%d, %d, %d, %d]\n", arr[0], arr[1], arr[2], arr[3]); }
hello-openmp.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid; /*Create a thread and fork */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); printf("Hello World from OpenMP thread = %d\n", tid); /* Only master thread does this */ if (tid == 0){ nthreads = omp_get_num_threads(); //printf("Number of threads = %d\n", nthreads); } } /* All threads join master thread and disband */ }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-3,4)),ceild(8*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(4*t1+Ny+5,16)),floord(8*t2+Ny+4,16)),floord(8*t1-8*t2+Nz+Ny+3,16));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32)),ceild(16*t3-Ny-28,32));t4<=min(min(min(min(floord(Nt+Nx-4,32),floord(4*t1+Nx+5,32)),floord(8*t2+Nx+4,32)),floord(16*t3+Nx+12,32)),floord(8*t1-8*t2+Nz+Nx+3,32));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),16*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),16*t3+14),32*t4+30),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(32*t4,t5+1); ubv=min(32*t4+31,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_int32 // A.*B function (eWiseMult): GB_AemultB__isle_int32 // A*D function (colscale): GB_AxD__isle_int32 // D*A function (rowscale): GB_DxB__isle_int32 // C+=B function (dense accum): GB_Cdense_accumB__isle_int32 // C+=b function (dense accum): GB_Cdense_accumb__isle_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_int32 // C=scalar+B GB_bind1st__isle_int32 // C=scalar+B' GB_bind1st_tran__isle_int32 // C=A+scalar GB_bind2nd__isle_int32 // C=A'+scalar GB_bind2nd_tran__isle_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isle_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_int32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices: % The vertex nearest the origin in RGB space and the vertex farthest from % the origin. % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _RealPixelPacket { MagickRealType red, green, blue, opacity; } RealPixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; RealPixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; RealPixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; ssize_t *cache; RealPixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(const Image *,CubeInfo *,const NodeInfo *), PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,RealPixelPacket *alpha_pixel) { MagickRealType alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline Quantum ClampToUnsignedQuantum(const MagickRealType value) { if (value <= 0.0) return((Quantum) 0); if (value >= QuantumRange) return((Quantum) QuantumRange); return((Quantum) (value+0.5)); } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const RealPixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampToUnsignedQuantum( GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar( ClampToUnsignedQuantum(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampToUnsignedQuantum(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse) && (image->colorspace != CMYColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { RealPixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { Quantum intensity; register PixelPacket *restrict q; register ssize_t i; /* Monochrome image. */ q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { intensity=(Quantum) (PixelIntensity(q) < ((MagickRealType) QuantumRange/2.0) ? 0 : QuantumRange); SetPixelRed(q,intensity); SetPixelGreen(q,intensity); SetPixelBlue(q,intensity); q++; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if (cube_info->quantize_info->colorspace == TransparentColorspace) associate_alpha=MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; RealPixelPacket error, mid, midpoint, pixel; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if ((image->colorspace != GRAYColorspace) && (image->colorspace != CMYColorspace) && (IsRGBColorspace(image->colorspace) == MagickFalse)) (void) TransformImageColorspace((Image *) image,RGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(image,cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*pixel.red; node_info->total_color.green+=count*QuantumScale*pixel.green; node_info->total_color.blue+=count*QuantumScale*pixel.blue; if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*pixel.opacity; p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,RGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register MagickRealType alpha, beta, distance; register PixelPacket *restrict p; register RealPixelPacket *restrict q; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha-beta; distance+=pixel*pixel; if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=1.0/(fabs(alpha) <= MagickEpsilon ? 1.0 : alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { MagickRealType gamma; gamma=(MagickRealType) (QuantumScale*(QuantumRange- (MagickRealType) q->opacity)); gamma=1.0/(fabs(gamma) <= MagickEpsilon ? 1.0 : gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) ( alpha*gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->cache != (ssize_t *) NULL) cube_info->cache=(ssize_t *) RelinquishMagickMemory(cube_info->cache); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels) { register ssize_t i; assert(pixels != (RealPixelPacket **) NULL); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) if (pixels[i] != (RealPixelPacket *) NULL) pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static RealPixelPacket **AcquirePixelThreadSet(const size_t count) { RealPixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=GetOpenMPMaximumThreads(); pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (RealPixelPacket **) NULL) return((RealPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (RealPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const RealPixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampToUnsignedQuantum(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampToUnsignedQuantum( pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; RealPixelPacket **pixels; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (RealPixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; RealPixelPacket *current, *previous; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { RealPixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FloydSteinbergDither) #endif proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" MagickBooleanType proceed; RealPixelPacket color, pixel; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampToUnsignedQuantum(pixel.red); pixel.green=(MagickRealType) ClampToUnsignedQuantum(pixel.green); pixel.blue=(MagickRealType) ClampToUnsignedQuantum(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampToUnsignedQuantum(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } node_info=node_info->parent; /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireCacheView(image); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->cache=(ssize_t *) AcquireQuantumMemory(length, sizeof(*cube_info->cache)); if (cube_info->cache == (ssize_t *) NULL) return((CubeInfo *) NULL); /* Initialize color cache. */ for (i=0; i < (ssize_t) length; i++) cube_info->cache[i]=(-1); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=1.0/weight; weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline ssize_t MagickRound(MagickRealType x) { /* Round the fraction to nearest integer. */ if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(image,cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(image,cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(image,cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if ((IsGrayImage(image,&image->exception) != MagickFalse) && (image->matte == MagickFalse)) (void) SetGrayscaleImage(image); if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) return(MagickTrue); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(image,cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(image,cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(image,cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; ssize_t intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelIntensityToQuantum(color_1)-(ssize_t) PixelIntensityToQuantum(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; for (i=0; i <= (ssize_t) MaxMap; i++) colormap_index[i]=(-1); if (AcquireImageColormap(image,MaxMap+1) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (IsMonochromeImage(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
calculate_discontinuous_distance_to_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Pooyan Dadvand // Ruben Zorrilla // #if !defined(KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "geometries/plane_3d.h" #include "includes/checks.h" #include "processes/process.h" #include "processes/find_intersected_geometrical_objects_process.h" namespace Kratos { ///@addtogroup Kratos Core ///@{ ///@name Kratos Classes ///@{ /// This only calculates the distance. Calculating the inside outside should be done by a derived class of this. /** This process takes a volume model part (with tetrahedra mesh) and a skin model part (with triangle mesh) and and calcualtes the distance to the skin for all the elements and nodes of the volume model part. */ template<std::size_t TDim = 3> class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateDiscontinuousDistanceToSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateDiscontinuousDistanceToSkinProcess); ///@} ///@name Life Cycle ///@{ /// Constructor to be used. CalculateDiscontinuousDistanceToSkinProcess( ModelPart& rVolumePart, ModelPart& rSkinPart); /// Destructor. ~CalculateDiscontinuousDistanceToSkinProcess() override; ///@} ///@name Deleted ///@{ /// Default constructor. CalculateDiscontinuousDistanceToSkinProcess() = delete; /// Copy constructor. CalculateDiscontinuousDistanceToSkinProcess(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; /// Assignment operator. CalculateDiscontinuousDistanceToSkinProcess& operator=(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete; FindIntersectedGeometricalObjectsProcess mFindIntersectedObjectsProcess; ///@} ///@name Operations ///@{ /** * @brief Initializes discontinuous distance computation process * This method initializes the TO_SPLIT flag, the DISTANCE and * ELEMENTAL_DISTANCES variables as well as the EMBEDDED_VELOCITY */ virtual void Initialize(); /** * @brief Calls the FindIntersectedObjectsProcess to find the intersections * This method calls the FindIntersectedObjectsProcess FindIntersections method. */ virtual void FindIntersections(); /** * @brief Get the array containing the intersecting objects * This method returns an array containing pointers to the intersecting geometries * @return std::vector<PointerVector<GeometricalObject>>& */ virtual std::vector<PointerVector<GeometricalObject>>& GetIntersections(); /** * @brief Computes the elemental distance values * Given an intersecting objects vector, this method computes the elemental distance field * @param rIntersectedObjects array containing pointers to the intersecting geometries */ virtual void CalculateDistances(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects); /** * @brief Calls the FindIntersectedObjects Clear() method * This method calls the FindIntersectedObjects Clear() to empty the intersecting objects geometries array */ void Clear() override; /** * @brief Executes the CalculateDiscontinuousDistanceToSkinProcess * This method automatically does all the calls required to compute the discontinuous distance function. */ void Execute() override; /** * @brief Calculate embedded variable from skin double specialization * This method calls the specialization method for two double variables * @param rVariable origin double variable in the skin mesh * @param rEmbeddedVariable elemental double variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<double> &rVariable, const Variable<double> &rEmbeddedVariable); /** * @brief Calculate embedded variable from skin array specialization * This method calls the specialization method for two double variables * @param rVariable origin array variable in the skin mesh * @param rEmbeddedVariable elemental array variable in the volume mesh to be computed */ void CalculateEmbeddedVariableFromSkin( const Variable<array_1d<double,3>> &rVariable, const Variable<array_1d<double,3>> &rEmbeddedVariable); ///@} ///@name Access ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override; /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override; /// Print object's data. void PrintData(std::ostream& rOStream) const override; ///@} protected: ///@name Protected Operations ///@{ /** * @brief Set the Intersection Plane object * This method returns the plane that defines the element intersection. The 2D * case is considered to be a simplification of the 3D one, so a "fake" extra * point is created by extruding the first point in the z-direction. * @param rIntPtsVector array containing the intersecting points coordinates * @return Plane3D the plane defined by the given intersecting points coordinates */ Plane3D SetIntersectionPlane(const std::vector<array_1d<double,3>> &rIntPtsVector); /** * @brief Calculates the domain characteristic length * This method computes the domain characteristic length as the norm of * the diagonal vector that joins the maximum and minimum coordinates * @return double the calculated characteristic length */ double CalculateCharacteristicLength(); ///@} private: ///@name Member Variables ///@{ ModelPart& mrSkinPart; ModelPart& mrVolumePart; ///@} ///@name Private Operations ///@{ /** * @brief Computes the discontinuous distance in one element * This method computes the discontinuous distance field for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries */ void CalculateElementalDistances( Element& rElement1, PointerVector<GeometricalObject>& rIntersectedObjects); /** * @brief Computes the edges intersections in one element * Provided a list of elemental intersecting geometries, this * method computes the edge intersections for a given element * @param rElement1 reference to the element of interest * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rCutEdgesVector array that classifies the edges depending on their cut / uncut status * @param rIntersectionPointsArray array containing the edges intersection points * @return unsigned int number of cut edges */ unsigned int ComputeEdgesIntersections( Element& rElement1, const PointerVector<GeometricalObject>& rIntersectedObjects, std::vector<unsigned int> &rCutEdgesVector, std::vector<array_1d <double,3> > &rIntersectionPointsArray); /** * @brief Computes the intersection of a single edge * This method computes the intersection of a given edge with the candidate * intersecting geometry. This operation is performed accordingly to the working * space dimension using the intersection utilities implemented in intersection_utilities.h * @param rIntObjGeometry candidate intersecting geometry * @param rEdgePoint1 edge origin point * @param rEdgePoint2 edge end point * @param rIntersectionPoint intersection point * @return int type of intersection id (see intersection_utilities.h) */ int ComputeEdgeIntersection( const Element::GeometryType& rIntObjGeometry, const Element::NodeType& rEdgePoint1, const Element::NodeType& rEdgePoint2, Point& rIntersectionPoint); /** * @brief Computes the element intersection unit normal * This method computes the element intersection unit normal vector using the distance function gradient. * @param rGeometry reference to the geometry of the element of interest * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values * @param rNormal obtained unit normal vector */ void ComputeIntersectionNormal( Element::GeometryType& rGeometry, const Vector& rElementalDistances, array_1d<double,3> &rNormal); /** * @brief Computes the intersection plane approximation * For complex intersection patterns, this method takes a list containing * all the intersecting points and computes the plane that minimizes the * distance from all these points in a least squares sense. The approximated * plane is defined in terms of an origin point and its normal vector. * @param rElement1 reference to the element of interest * @param rPointsCoord list containing the coordinates of al the intersecting points * @param rPlaneBasePointCoords base point defining the approximated plane * @param rPlaneNormal normal vector defining the approximated plane */ void ComputePlaneApproximation( const Element& rElement1, const std::vector< array_1d<double,3> >& rPointsCoord, array_1d<double,3>& rPlaneBasePointCoords, array_1d<double,3>& rPlaneNormal); /** * @brief Checks (and corrects if needed) the intersection normal orientation * This method checks the orientation of the previously computed intersection normal. * To do that, the normal vector to each one of the intersecting geometries is * computed and its directo is compared against the current one. If the negative * votes win, the current normal vector orientation is switched. * @param rGeometry element of interest geometry * @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries * @param rElementalDistances array containing the ELEMENTAL_DISTANCES values */ void CorrectDistanceOrientation( Element::GeometryType& rGeometry, const PointerVector<GeometricalObject>& rIntersectedObjects, Vector& rElementalDistances); /** * @brief Computes the normal vector to an intersecting object geometry * This method computes the normal vector to an intersecting object geometry. * @param rGeometry reference to the geometry of the intersecting object * @param rIntObjNormal reference to the intersecting object normal vector */ void inline ComputeIntersectionNormalFromGeometry( const Element::GeometryType &rGeometry, array_1d<double,3> &rIntObjNormal); /** * @brief Computes the value of any embedded variable * For a given array variable in the skin mesh, this method calculates the value * of such variable in the embedded mesh. This is done in each element of the volume * mesh by computing the average value of all the edges intersections. This value * is averaged again according to the number of intersected edges. * @tparam TVarType variable type * @param rVariable origin variable in the skin mesh * @param rEmbeddedVariable elemental variable in the volume mesh to be computed */ template<class TVarType> void CalculateEmbeddedVariableFromSkinSpecialization( const Variable<TVarType> &rVariable, const Variable<TVarType> &rEmbeddedVariable) { const auto &r_int_obj_vect= this->GetIntersections(); const int n_elems = mrVolumePart.NumberOfElements(); KRATOS_ERROR_IF((mrSkinPart.NodesBegin())->SolutionStepsDataHas(rVariable) == false) << "Skin model part solution step data missing variable: " << rVariable << std::endl; // Initialize embedded variable value #pragma omp parallel for for (int i_elem = 0; i_elem < n_elems; ++i_elem) { auto it_elem = mrVolumePart.ElementsBegin() + i_elem; it_elem->SetValue(rEmbeddedVariable, rEmbeddedVariable.Zero()); } // Compute the embedded variable value for each element #pragma omp parallel for schedule(dynamic) for (int i_elem = 0; i_elem < n_elems; ++i_elem) { // Check if the current element has intersecting entities if (r_int_obj_vect[i_elem].size() != 0) { // Initialize the element values unsigned int n_int_edges = 0; auto it_elem = mrVolumePart.ElementsBegin() + i_elem; auto &r_geom = it_elem->GetGeometry(); const auto edges = r_geom.GenerateEdges(); // Loop the element of interest edges for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) { // Initialize edge values unsigned int n_int_obj = 0; TVarType i_edge_val = rEmbeddedVariable.Zero(); // Check the edge intersection against all the candidates for (auto &r_int_obj : r_int_obj_vect[i_elem]) { Point intersection_point; const int is_intersected = this->ComputeEdgeIntersection( r_int_obj.GetGeometry(), edges[i_edge][0], edges[i_edge][1], intersection_point); // Compute the variable value in the intersection point if (is_intersected == 1) { n_int_obj++; array_1d<double,3> local_coords; r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point); Vector int_obj_N; r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords); for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) { i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(rVariable) * int_obj_N[i_node]; } } } // Check if the edge is intersected if (n_int_obj != 0) { // Update the element intersected edges counter n_int_edges++; // Add the average edge value (there might exist cases in where // more than one geometry intersects the edge of interest). it_elem->GetValue(rEmbeddedVariable) += i_edge_val / n_int_obj; } } // Average between all the intersected edges if (n_int_edges != 0) { it_elem->GetValue(rEmbeddedVariable) /= n_int_edges; } } } }; ///@} }; // Class CalculateDiscontinuousDistanceToSkinProcess ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> ( std::istream& rIStream, CalculateDiscontinuousDistanceToSkinProcess<>& rThis); /// output stream function inline std::ostream& operator << ( std::ostream& rOStream, const CalculateDiscontinuousDistanceToSkinProcess<>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED defined
abs.h
#pragma once #include <vector> #include <unordered_map> #include <algorithm> #include <cmath> #include <omp.h> #include "_cuda.h" using std::vector; using std::unordered_map; using std::max; using std::abs; // ABS // --- template <class T> void abs(T *a, int N) { for (int i=0; i<N; i++) a[i] = abs(a[i]); } template <class T> void abs(vector<T>& a) { abs(a.begin(), a.end()); } template <class K, class T> void abs(unordered_map<K, T>& a) { for (auto& p : a) p.second = abs(p.second); } // ABS-AT // ------ template <class T, class I> void absAt(T *a, I&& is) { for (int i : is) a[i] = abs(a[i]); } template <class T, class I> void absAt(vector<T>& a, I&& is) { absAt(a.data(), is); } template <class K, class T, class I> void absAt(unordered_map<K, T>& a, I&& ks) { for (auto&& k : ks) a[k] = abs(a[k]); } // ABS (OMP) // --------- template <class T> void absOmp(T *a, int N) { #pragma omp parallel for for (int i=0; i<N; i++) a[i] = abs(a[i]); } template <class T> void fillOmp(vector<T>& a) { absOmp(a.data(), a.size()); } // ABS (CUDA) // ---------- template <class T> __device__ void absKernelLoop(T *a, int N, int i, int DI) { for (; i<N; i+=DI) a[i] = abs(a[i]); } template <class T> __global__ void absKernel(T *a, int N) { DEFINE(t, b, B, G); absKernelLoop(a, N, B*b+t, G*B); } template <class T> void absCuda(T *a, int N) { int B = BLOCK_DIM; int G = min(ceilDiv(N, B), GRID_DIM); size_t N1 = N * sizeof(T); T *aD; TRY( cudaMalloc(&aD, N1) ); TRY( cudaMemcpy(aD, a, N1, cudaMemcpyHostToDevice) ); absKernel<<<G, B>>>(aD, N); TRY( cudaMemcpy(a, aD, N1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(aD) ); } template <class T> void absCuda(vector<T>& a) { absCuda(a.data(), a.size()); }
GB_binop__second_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_uint64) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint64) // A*D function (colscale): GB (_AxD__second_uint64) // D*A function (rowscale): GB (_DxB__second_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__second_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__second_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB (_bind2nd__second_uint64) // C=A'+scalar GB (_bind2nd_tran__second_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = bij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__second_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__second_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__second_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__second_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB (_bind2nd_tran__second_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
internal-parallel.h
/* * returns index of the last item satisfying * [item] < P, * * returns -1 if [all] < P * */ static ptrdiff_t _bsearch_last_lt(void * P, void * base, size_t nmemb, struct crstruct * d) { if (nmemb == 0) return -1; unsigned char tmpradix[d->rsize]; ptrdiff_t left = 0; ptrdiff_t right = nmemb - 1; d->radix((char*) base, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) >= 0) { return - 1; } d->radix((char*) base + right * d->size, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) < 0) { return nmemb - 1; } /* left <= i <= right*/ /* [left] < P <= [right] */ while(right > left + 1) { ptrdiff_t mid = ((right - left + 1) >> 1) + left; d->radix((char*) base + mid * d->size, tmpradix, d->arg); /* if [mid] < P , move left to mid */ /* if [mid] >= P , move right to mid */ int c1 = d->compar(tmpradix, P, d->rsize); if(c1 < 0) { left = mid; } else { right = mid; } } return left; } /* * returns index of the last item satisfying * [item] <= P, * * */ static ptrdiff_t _bsearch_last_le(void * P, void * base, size_t nmemb, struct crstruct * d) { if (nmemb == 0) return -1; unsigned char tmpradix[d->rsize]; ptrdiff_t left = 0; ptrdiff_t right = nmemb - 1; d->radix((char*) base, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) > 0) { return -1; } d->radix((char*) base + right * d->size, tmpradix, d->arg); if(d->compar(tmpradix, P, d->rsize) <= 0) { return nmemb - 1; } /* left <= i <= right*/ /* [left] <= P < [right] */ while(right > left + 1) { ptrdiff_t mid = ((right - left + 1) >> 1) + left; d->radix((char*) base + mid * d->size, tmpradix, d->arg); /* if [mid] <= P , move left to mid */ /* if [mid] > P , move right to mid*/ int c1 = d->compar(tmpradix, P, d->rsize); if(c1 <= 0) { left = mid; } else { right = mid; } } return left; } /* * do a histogram of mybase, based on bins defined in P. * P is an array of radix of length Plength, * myCLT, myCLE are of length Plength + 2 * * myCLT[i + 1] is the count of items less than P[i] * myCLE[i + 1] is the count of items less than or equal to P[i] * * myCLT[0] is always 0 * myCLT[Plength + 1] is always mynmemb * * */ static void _histogram(unsigned char * P, int Plength, void * mybase, size_t mynmemb, ptrdiff_t * myCLT, ptrdiff_t * myCLE, struct crstruct * d) { int it; if(myCLT) { myCLT[0] = 0; for(it = 0; it < Plength; it ++) { /* No need to start from the beginging of mybase, since myubase and P are both sorted */ ptrdiff_t offset = myCLT[it]; myCLT[it + 1] = _bsearch_last_lt(P + it * d->rsize, ((char*) mybase) + offset * d->size, mynmemb - offset, d) + 1 + offset; } myCLT[it + 1] = mynmemb; } if(myCLE) { myCLE[0] = 0; for(it = 0; it < Plength; it ++) { /* No need to start from the beginging of mybase, since myubase and P are both sorted */ ptrdiff_t offset = myCLE[it]; myCLE[it + 1] = _bsearch_last_le(P + it * d->rsize, ((char*) mybase) + offset * d->size, mynmemb - offset, d) + 1 + offset; } myCLE[it + 1] = mynmemb; } } struct piter { int * stable; int * narrow; int Plength; unsigned char * Pleft; unsigned char * Pright; struct crstruct * d; }; static void piter_init(struct piter * pi, unsigned char * Pmin, unsigned char * Pmax, int Plength, struct crstruct * d) { pi->stable = malloc(Plength * sizeof(int)); pi->narrow = malloc(Plength * sizeof(int)); pi->d = d; pi->Pleft = malloc(d->rsize * Plength); pi->Pright = malloc(d->rsize * Plength); pi->Plength = Plength; int i; for(i = 0; i < pi->Plength; i ++) { pi->stable[i] = 0; pi->narrow[i] = 0; memcpy(&pi->Pleft[i * d->rsize], Pmin, d->rsize); memcpy(&pi->Pright[i * d->rsize], Pmax, d->rsize); } } static void piter_destroy(struct piter * pi) { free(pi->stable); free(pi->narrow); free(pi->Pleft); free(pi->Pright); } /* * this will bisect the left / right in piter. * note that piter goes [left, right], thus we need * to maintain an internal status to make sure we go over * the additional 'right]'. (usual bisect range is * '[left, right)' ) * */ static void piter_bisect(struct piter * pi, unsigned char * P) { struct crstruct * d = pi->d; int i; for(i = 0; i < pi->Plength; i ++) { if(pi->stable[i]) continue; if(pi->narrow[i]) { /* The last iteration, test Pright directly */ memcpy(&P[i * d->rsize], &pi->Pright[i * d->rsize], d->rsize); pi->stable[i] = 1; } else { /* ordinary iteration */ d->bisect(&P[i * d->rsize], &pi->Pleft[i * d->rsize], &pi->Pright[i * d->rsize], d->rsize); /* in case the bisect can't move P beyond left, * the range is too small, so we set flag narrow, * and next iteration we will directly test Pright */ if(d->compar(&P[i * d->rsize], &pi->Pleft[i * d->rsize], d->rsize) <= 0) { pi->narrow[i] = 1; } } #if 0 printf("bisect %d %u %u %u\n", i, *(int*) &P[i * d->rsize], *(int*) &pi->Pleft[i * d->rsize], *(int*) &pi->Pright[i * d->rsize]); #endif } } static int piter_all_done(struct piter * pi) { int i; int done = 1; #if 0 #pragma omp single for(i = 0; i < pi->Plength; i ++) { printf("P %d stable %d narrow %d\n", i, pi->stable[i], pi->narrow[i]); } #endif for(i = 0; i < pi->Plength; i ++) { if(!pi->stable[i]) { done = 0; break; } } return done; } /* * bisection acceptance test. * * test if the counts satisfies CLT < C <= CLE. * move Pleft / Pright accordingly. * */ static void piter_accept(struct piter * pi, unsigned char * P, ptrdiff_t * C, ptrdiff_t * CLT, ptrdiff_t * CLE) { struct crstruct * d = pi->d; int i; #if 0 for(i = 0; i < pi->Plength + 1; i ++) { printf("counts %d LT %ld C %ld LE %ld\n", i, CLT[i], C[i], CLE[i]); } #endif for(i = 0; i < pi->Plength; i ++) { if( CLT[i + 1] < C[i + 1] && C[i + 1] <= CLE[i + 1]) { pi->stable[i] = 1; continue; } else { if(CLT[i + 1] >= C[i + 1]) { /* P[i] is too big */ memcpy(&pi->Pright[i * d->rsize], &P[i * d->rsize], d->rsize); } else { /* P[i] is too small */ memcpy(&pi->Pleft[i * d->rsize], &P[i * d->rsize], d->rsize); } } } }
pjenccc0.c
// Copyright (c) 2018 Intel Corporation // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. /* // // Purpose: // Color conversions functions, forward transform // // Contents: // mfxiRGBToYCbCr_JPEG_8u_P3R // mfxiRGBToYCbCr_JPEG_8u_C3P3R // mfxiBGRToYCbCr_JPEG_8u_C3P3R // */ #include "precomp.h" #ifdef _OPENMP #include <omp.h> #endif #ifndef __OWNJ_H__ #include "ownj.h" #endif #ifndef __PJENCCCTBL_H__ #include "pjenccctbl.h" #endif #ifndef __PJENCCC_H__ #include "pjenccc.h" #endif #if (_IPPXSC < _IPPXSC_S2) LOCFUN(void, mfxownpj_RGBToYCbCr_JPEG_8u_P3R, ( const Ipp8u* r, const Ipp8u* g, const Ipp8u* b, Ipp8u* y, Ipp8u* cb, Ipp8u* cr, int width)) { int i; int w4; int rv; int gv; int bv; w4 = 0; for(i = w4; i < width; i++) { rv = r[i]; gv = g[i]; bv = b[i]; y[i] = (Ipp8u)((mfxcc_table[rv + R_Y_OFFS] + mfxcc_table[gv + G_Y_OFFS] + mfxcc_table[bv + B_Y_OFFS] + 3) >> 16); cb[i] = (Ipp8u)((mfxcc_table[rv + R_CB_OFF] + mfxcc_table[gv + G_CB_OFF] + mfxcc_table[bv + B_CB_OFF] + 3) >> 16); cr[i] = (Ipp8u)((mfxcc_table[rv + R_CR_OFF] + mfxcc_table[gv + G_CR_OFF] + mfxcc_table[bv + B_CR_OFF] + 3) >> 16); } return; } /* mfxownpj_RGBToYCbCr_JPEG_8u_P3R() */ #if !((IPPJ_ENCCC_OPT) || (_IPPLRB>=_IPPLRB_B1)) LOCFUN(void, mfxownpj_RGBToYCbCr_JPEG_8u_C3P3R, ( const Ipp8u* rgb, Ipp8u* y, Ipp8u* cb, Ipp8u* cr, int width)) { int i; int r, g, b; for(i = 0; i < width; i++) { r = rgb[0]; g = rgb[1]; b = rgb[2]; rgb += 3; y[i] = (Ipp8u)((mfxcc_table[r + R_Y_OFFS] + mfxcc_table[g + G_Y_OFFS] + mfxcc_table[b + B_Y_OFFS] + 3) >> 16); cb[i] = (Ipp8u)((mfxcc_table[r + R_CB_OFF] + mfxcc_table[g + G_CB_OFF] + mfxcc_table[b + B_CB_OFF] + 3) >> 16); cr[i] = (Ipp8u)((mfxcc_table[r + R_CR_OFF] + mfxcc_table[g + G_CR_OFF] + mfxcc_table[b + B_CR_OFF] + 3) >> 16); } return; } /* mfxownpj_RGBToYCbCr_JPEG_8u_C3P3R() */ LOCFUN(void, mfxownpj_BGRToYCbCr_JPEG_8u_C3P3R, ( const Ipp8u* bgr, Ipp8u* y, Ipp8u* cb, Ipp8u* cr, int width)) { int i; int r, g, b; for(i = 0; i < width; i++) { r = bgr[2]; g = bgr[1]; b = bgr[0]; bgr += 3; y[i] = (Ipp8u)((mfxcc_table[r + R_Y_OFFS] + mfxcc_table[g + G_Y_OFFS] + mfxcc_table[b + B_Y_OFFS] + 3) >> 16); cb[i] = (Ipp8u)((mfxcc_table[r + R_CB_OFF] + mfxcc_table[g + G_CB_OFF] + mfxcc_table[b + B_CB_OFF] + 3) >> 16); cr[i] = (Ipp8u)((mfxcc_table[r + R_CR_OFF] + mfxcc_table[g + G_CR_OFF] + mfxcc_table[b + B_CR_OFF] + 3) >> 16); } return; } /* mfxownpj_BGRToYCbCr_JPEG_8u_C3P3R() */ #endif #endif /* ---------------------- library functions definitions -------------------- */ /* /////////////////////////////////////////////////////////////////////////// // Name: // mfxiRGBToYCbCr_JPEG_8u_P3R // // Purpose: // RGB to YCbCr color convert // // Parameter: // pSrc pointer to pointers to input data // SrcStep line offset in input data // pDst pointer to pointers to output data // DstStep line offset in output data // roiSize ROI size // // Returns: // IppStatus // // Notes: // the color conversion equations: // Y = 0.29900*R + 0.58700*G + 0.11400*B // Cb = -0.16874*R - 0.33126*G + 0.50000*B + 128.0 // Cr = 0.50000*R - 0.41869*G - 0.08131*B + 128.0 */ IPPFUN(IppStatus, mfxiRGBToYCbCr_JPEG_8u_P3R, ( const Ipp8u* pSrc[3], int SrcStep, Ipp8u* pDst[3], int DstStep, IppiSize roiSize)) { int i; IPP_BAD_ENC_CC_P3P3_RET() #ifdef _OPENMP #pragma omp parallel for IPP_OMP_NUM_THREADS() \ shared(pSrc,pDst,SrcStep,DstStep,roiSize) \ private(i) default(none) \ if((roiSize.height*roiSize.width) > (OMP_BOUNDARY)) #endif for(i = 0; i < roiSize.height; i++) { const Ipp8u* r; const Ipp8u* g; const Ipp8u* b; Ipp8u* y; Ipp8u* cb; Ipp8u* cr; r = pSrc[0] + i * SrcStep; g = pSrc[1] + i * SrcStep; b = pSrc[2] + i * SrcStep; y = pDst[0] + i * DstStep; cb = pDst[1] + i * DstStep; cr = pDst[2] + i * DstStep; mfxownpj_RGBToYCbCr_JPEG_8u_P3R(r, g, b, y, cb, cr, roiSize.width); } return ippStsNoErr; } /* mfxiRGBToYCbCr_JPEG_8u_P3R() */ /* /////////////////////////////////////////////////////////////////////////// // Name: // mfxiRGBToYCbCr_JPEG_8u_C3P3R // // Purpose: // RGB to YCbCr color convert // // Parameter: // pSrc pointer to input data // SrcStep line offset in input data // pDst pointer to pointers to output data // DstStep line offset in output data // roiSize ROI size // // Returns: // IppStatus // // Notes: // the color conversion equations: // Y = 0.29900*R + 0.58700*G + 0.11400*B // Cb = -0.16874*R - 0.33126*G + 0.50000*B + 128.0 // Cr = 0.50000*R - 0.41869*G - 0.08131*B + 128.0 */ IPPFUN(IppStatus, mfxiRGBToYCbCr_JPEG_8u_C3P3R, ( const Ipp8u* pSrc, int SrcStep, Ipp8u* pDst[3], int DstStep, IppiSize roiSize)) { int i; IPP_BAD_ENC_CC_C3P3_RET() #ifdef _OPENMP #pragma omp parallel for IPP_OMP_NUM_THREADS() \ shared(pSrc,pDst,SrcStep,DstStep,roiSize) \ private(i) default(none) \ if((roiSize.height*roiSize.width) > (OMP_BOUNDARY)) #endif for(i = 0; i < roiSize.height; i++) { const Ipp8u* rgb; Ipp8u* y; Ipp8u* cb; Ipp8u* cr; rgb = pSrc + i * SrcStep; y = pDst[0] + i * DstStep; cb = pDst[1] + i * DstStep; cr = pDst[2] + i * DstStep; mfxownpj_RGBToYCbCr_JPEG_8u_C3P3R(rgb, y, cb, cr, roiSize.width); } return ippStsNoErr; } /* mfxiRGBToYCbCr_JPEG_8u_C3P3R() */ /* /////////////////////////////////////////////////////////////////////////// // Name: // mfxiBGRToYCbCr_JPEG_8u_C3P3R // // Purpose: // BGR to YCbCr color convert // // Parameter: // pSrc pointer to input data // SrcStep line offset in input data // pDst pointer to pointers to output data // DstStep line offset in output data // roiSize ROI size // // Returns: // IppStatus // // Notes: // the color conversion equations: // Y = 0.29900*R + 0.58700*G + 0.11400*B // Cb = -0.16874*R - 0.33126*G + 0.50000*B + 128.0 // Cr = 0.50000*R - 0.41869*G - 0.08131*B + 128.0 */ IPPFUN(IppStatus, mfxiBGRToYCbCr_JPEG_8u_C3P3R, ( const Ipp8u* pSrc, int SrcStep, Ipp8u* pDst[3], int DstStep, IppiSize roiSize)) { int i; IPP_BAD_ENC_CC_C3P3_RET() #ifdef _OPENMP #pragma omp parallel for IPP_OMP_NUM_THREADS() \ shared(pSrc,pDst,SrcStep,DstStep,roiSize) \ private(i) default(none) \ if((roiSize.height*roiSize.width) > (OMP_BOUNDARY)) #endif for(i = 0; i < roiSize.height; i++) { const Ipp8u* bgr; Ipp8u* y; Ipp8u* cb; Ipp8u* cr; bgr = pSrc + i * SrcStep; y = pDst[0] + i * DstStep; cb = pDst[1] + i * DstStep; cr = pDst[2] + i * DstStep; mfxownpj_BGRToYCbCr_JPEG_8u_C3P3R(bgr, y, cb, cr, roiSize.width); } return ippStsNoErr; } /* mfxiBGRToYCbCr_JPEG_8u_C3P3R() */
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: quanwang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct ir_tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { printf("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias, struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc(outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { printf("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
GB_unaryop__identity_int8_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_int16 // op(A') function: GB_tran__identity_int8_int16 // C type: int8_t // A type: int16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_int16 ( int8_t *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ellpic_bulirsch.c
#include<Python.h> #include<numpy/arrayobject.h> #include<math.h> #include<omp.h> #include<stdio.h> #define IND(a,i) *((double *)(a->data+i*a->strides[0])) static PyObject *ellpic_bulirsch(PyObject *self, PyObject *args); static PyObject *ellpic_bulirsch(PyObject *self, PyObject *args) { PyArrayObject *k,*n,*output; int i,isdone; double foo,max; npy_intp dims[1]; if(!PyArg_ParseTuple(args,"OO", &n, &k)) { return NULL; } dims[0] = k->dimensions[0]; output = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE); double kc[dims[0]],p[dims[0]],c[dims[0]],d[dims[0]],e[dims[0]],f[dims[0]],g[dims[0]],m0[dims[0]]; #pragma omp parallel for for(i=0;i<dims[0];i++) { kc[i] = sqrt(1.-pow(IND(k,i),2)); e[i] = kc[i]; p[i] = sqrt(IND(n,i)+1.); d[i] = 1./p[i]; c[i] = 1.; m0[i] = 1.; } isdone = 0; while (isdone == 0) { #pragma omp parallel for for(i=0;i<dims[0];i++) { f[i] = c[i]; c[i] = d[i]/p[i]+c[i]; g[i] = e[i]/p[i]; d[i] = 2.*(f[i]*g[i]+d[i]); p[i] = g[i] + p[i]; g[i] = m0[i]; m0[i] = kc[i] + m0[i]; } max = fabs(1.-kc[0]/g[0]); #pragma omp parallel for private(foo) for(i=1;i<dims[0];i++) { foo = fabs(1.-kc[i]/g[i]); if (foo > max) max = foo; } if (max > 1.e-8) { #pragma omp parallel for for(i=0;i<dims[0];i++) { kc[i] = 2*sqrt(e[i]); e[i] = kc[i]*m0[i]; } } else { #pragma omp parallel for for(i=0;i<dims[0];i++) IND(output,i) = 0.5*M_PI*(c[i]*m0[i]+d[i])/(m0[i]*(m0[i]+p[i])); isdone = 1; } } return PyArray_Return(output); } static char module_docstring[]="\ Computes the complete elliptical integral of the third kind using\n\ the algorithm of Bulirsch (1965).\n\ \n\ Parameters\n\ ----------\n\ n: 1D NPY ARRAY - contains values from trquad.py\n\ k: 1D NPY ARRAY - contains values from trquad.py\n\ \n\ Returns\n\ -------\n\ output: 1D NPY ARRAY - \n\ \n\ Revisions\n\ ---------\n\ Original version by Jason Eastman\n\ 2012-08-25 Kevin Stevenson, UChicago \n\ kbs@uchicago.edu\n\ Converted from Python\n\n\ 2018-11-22 Jonathan Fraine, SSI\n\ jfraine at spacescience.org\n\ Updated c extensions to python3, with support for python2.7\n\n\ "; static PyMethodDef module_methods[] = { {"ellpic_bulirsch", ellpic_bulirsch,METH_VARARGS,module_docstring},{NULL}}; // static char module_docstring[] = // "This module is used to calcuate the ellpic_bulirsch"; PyMODINIT_FUNC #if PY_MAJOR_VERSION >= 3 PyInit_ellpic_bulirsch(void) #else initellpic_bulirsch(void) #endif { #if PY_MAJOR_VERSION >= 3 PyObject *module; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "ellpic_bulirsch", /* m_name */ module_docstring, /* m_doc */ -1, /* m_size */ module_methods, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; #endif #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); if (!module) return NULL; /* Load `numpy` functionality. */ import_array(); return module; #else PyObject *m = Py_InitModule3("ellpic_bulirsch", module_methods, module_docstring); if (m == NULL) return; /* Load `numpy` functionality. */ import_array(); #endif }
GrB_Matrix_export.c
//------------------------------------------------------------------------------ // GrB_Matrix_export: export a matrix in CSR, CSC, FullC, FullR, or COO format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Exports the contents of a matrix in one of 3 formats: CSR, CSC, or COO // (triplet format). The exported matrix is not modified. No typecast is // performed; the output array Ax must be of the same type as the input matrix // A. // The required sizes of the Ap, Ai, and Ax arrays are given by // GrB_Matrix_exportSize. // The GraphBLAS C API does not have a GrB* method to query the type of a // GrB_Matrix or the size of a type. SuiteSparse:GraphBLAS provides // GxB_Matrix_type_name to query the type of a matrix (returning a string), // which can be converted into a GrB_Type with GxB_Type_from_name. The size of // a type can be queried with GxB_Type_size. Using these methods, a user // application can ensure that its Ax array has the correct size for any // given GrB_Matrix it wishes to export, regardless of its type. #include "GB_transpose.h" #define GB_FREE_ALL \ { \ GB_phbix_free (T) ; \ } //------------------------------------------------------------------------------ // GB_export_worker: export a matrix of any type //------------------------------------------------------------------------------ static GrB_Info GB_export_worker // export a matrix ( GrB_Index *Ap, // pointers for CSR, CSC, row indices for COO GrB_Index *Ai, // row indices for CSR, CSC, col indices for COO void *Ax, // values (must match the type of A_input) GrB_Index *Ap_len, // number of entries in Ap (not # of bytes) GrB_Index *Ai_len, // number of entries in Ai (not # of bytes) GrB_Index *Ax_len, // number of entries in Ax (not # of bytes) GrB_Format format, // export format GrB_Matrix A_input, // matrix to export GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = A_input ; struct GB_Matrix_opaque T_header ; GrB_Matrix T = GB_clear_static_header (&T_header) ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : case GrB_COO_FORMAT : GB_RETURN_IF_NULL (Ap) ; GB_RETURN_IF_NULL (Ap_len) ; GB_RETURN_IF_NULL (Ai) ; GB_RETURN_IF_NULL (Ai_len) ; default: GB_RETURN_IF_NULL (Ax) ; GB_RETURN_IF_NULL (Ax_len) ; } // finish any pending work GB_MATRIX_WAIT (A) ; //-------------------------------------------------------------------------- // determine current format of A and if a copy is needed //-------------------------------------------------------------------------- int sparsity = GB_sparsity (A) ; bool is_csc = A->is_csc ; bool make_copy ; bool csc_requested ; switch (format) { case GrB_CSR_FORMAT : make_copy = !(sparsity == GxB_SPARSE && !is_csc) ; csc_requested = false ; break ; case GrB_CSC_FORMAT : make_copy = !(sparsity == GxB_SPARSE && is_csc) ; csc_requested = true ; break ; // case GrB_DENSE_ROW_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && !is_csc) ; // csc_requested = false ; // break ; // case GrB_DENSE_COL_FORMAT : // if (!GB_is_dense (A)) // { // // A must dense or full // return (GrB_INVALID_VALUE) ; // } // make_copy = !(sparsity == GxB_FULL && is_csc) ; // csc_requested = true ; // break ; case GrB_COO_FORMAT : // never make a copy to export in tuple format make_copy = false ; csc_requested = is_csc ; break ; default : // unknown format return (GrB_INVALID_VALUE) ; } //-------------------------------------------------------------------------- // create a copy if the matrix is not in the requested format //-------------------------------------------------------------------------- if (make_copy) { if (is_csc != csc_requested) { // T = A' GB_OK (GB_transpose_cast (T, A->type, csc_requested, A, false, Context)) ; } else { // T = A GB_OK (GB_dup_worker (&T, A->iso, A, true, A->type, Context)) ; } switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : GB_OK (GB_convert_any_to_sparse (T, Context)) ; break ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : // GB_convert_any_to_full (T) ; // break ; default : break ; } A = T ; } //-------------------------------------------------------------------------- // export the contents of the matrix //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; GrB_Index nvals = GB_nnz (A) ; int64_t plen = A->vdim+1 ; switch (format) { case GrB_CSR_FORMAT : case GrB_CSC_FORMAT : if (plen > (*Ap_len) || nvals > (*Ai_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_memcpy (Ap, A->p, plen * sizeof (GrB_Index), nthreads_max) ; GB_memcpy (Ai, A->i, nvals * sizeof (GrB_Index), nthreads_max) ; (*Ap_len) = plen ; (*Ai_len) = nvals ; // case GrB_DENSE_ROW_FORMAT : // case GrB_DENSE_COL_FORMAT : if (nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } (*Ax_len) = nvals ; ASSERT (csc_requested == A->is_csc) ; if (A->iso) { // expand the iso A->x into the non-iso array Ax ASSERT (nvals > 0) ; GB_iso_expand (Ax, nvals, A->x, A->type->size, Context) ; } else { GB_memcpy (Ax, A->x, nvals * A->type->size, nthreads_max) ; } break ; default: case GrB_COO_FORMAT : if (nvals > (*Ap_len) || nvals > (*Ai_len) || nvals > (*Ax_len)) { GB_FREE_ALL ; return (GrB_INSUFFICIENT_SPACE) ; } GB_OK (GB_extractTuples (Ap, Ai, Ax, &nvals, A->type->code, A, Context)) ; (*Ap_len) = nvals ; (*Ai_len) = nvals ; (*Ax_len) = nvals ; break ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_ALL ; #pragma omp flush return (GrB_SUCCESS) ; } //------------------------------------------------------------------------------ // GrB_Matrix_export_*: export a matrix of a given type //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL ; #define GB_EXPORT(prefix,ctype,T,acode) \ GrB_Info GB_EVAL3 (prefix, _Matrix_export_, T) /* export a matrix */ \ ( \ GrB_Index *Ap, /* pointers for CSR, CSC, row indices for COO */\ GrB_Index *Ai, /* row indices for CSR, CSC, col indices for COO */\ ctype *Ax, /* values (must match the type of A) */\ GrB_Index *Ap_len, /* number of entries in Ap (not # of bytes) */\ GrB_Index *Ai_len, /* number of entries in Ai (not # of bytes) */\ GrB_Index *Ax_len, /* number of entries in Ax (not # of bytes) */\ GrB_Format format, /* export format */\ GrB_Matrix A /* matrix to export */\ ) \ { \ GB_WHERE1 (GB_STR(prefix) "_Matrix_export_" GB_STR(T) \ " (Ap, Ai, Ax, &Ap_len, &Ai_len, &Ax_len, format, A)") ; \ GB_BURBLE_START (GB_STR(prefix) "_Matrix_export_" GB_STR(T)) ; \ GB_RETURN_IF_NULL_OR_FAULTY (A) ; \ if (A->type->code != acode) return (GrB_DOMAIN_MISMATCH) ; \ GrB_Info info = GB_export_worker (Ap, Ai, (void *) Ax, \ Ap_len, Ai_len, Ax_len, format, A, Context) ; \ GB_BURBLE_END ; \ return (info) ; \ } GB_EXPORT (GrB, bool , BOOL , GB_BOOL_code ) GB_EXPORT (GrB, int8_t , INT8 , GB_INT8_code ) GB_EXPORT (GrB, int16_t , INT16 , GB_INT16_code ) GB_EXPORT (GrB, int32_t , INT32 , GB_INT32_code ) GB_EXPORT (GrB, int64_t , INT64 , GB_INT64_code ) GB_EXPORT (GrB, uint8_t , UINT8 , GB_UINT8_code ) GB_EXPORT (GrB, uint16_t , UINT16 , GB_UINT16_code) GB_EXPORT (GrB, uint32_t , UINT32 , GB_UINT32_code) GB_EXPORT (GrB, uint64_t , UINT64 , GB_UINT64_code) GB_EXPORT (GrB, float , FP32 , GB_FP32_code ) GB_EXPORT (GrB, double , FP64 , GB_FP64_code ) GB_EXPORT (GxB, GxB_FC32_t, FC32 , GB_FC32_code ) GB_EXPORT (GxB, GxB_FC64_t, FC64 , GB_FC64_code ) GB_EXPORT (GrB, void , UDT , GB_UDT_code )
hist_util.h
/*! * Copyright 2017 by Contributors * \file hist_util.h * \brief Utility for fast histogram aggregation * \author Philip Cho, Tianqi Chen */ #ifndef XGBOOST_COMMON_HIST_UTIL_H_ #define XGBOOST_COMMON_HIST_UTIL_H_ #include <xgboost/data.h> #include <xgboost/generic_parameters.h> #include <limits> #include <vector> #include <algorithm> #include <memory> #include <utility> #include "row_set.h" #include "../tree/param.h" #include "./quantile.h" #include "./timer.h" #include "random.h" namespace xgboost { /*! * \brief A C-style array with in-stack allocation. As long as the array is smaller than * MaxStackSize, it will be allocated inside the stack. Otherwise, it will be * heap-allocated. */ template<typename T, size_t MaxStackSize> class MemStackAllocator { public: explicit MemStackAllocator(size_t required_size): required_size_(required_size) { } T* Get() { if (!ptr_) { if (MaxStackSize >= required_size_) { ptr_ = stack_mem_; } else { ptr_ = reinterpret_cast<T*>(malloc(required_size_ * sizeof(T))); do_free_ = true; } } return ptr_; } ~MemStackAllocator() { if (do_free_) free(ptr_); } private: T* ptr_ = nullptr; bool do_free_ = false; size_t required_size_; T stack_mem_[MaxStackSize]; }; namespace common { /* * \brief A thin wrapper around dynamically allocated C-style array. * Make sure to call resize() before use. */ template<typename T> struct SimpleArray { ~SimpleArray() { free(ptr_); ptr_ = nullptr; } void resize(size_t n) { T* ptr = static_cast<T*>(malloc(n*sizeof(T))); memcpy(ptr, ptr_, n_ * sizeof(T)); free(ptr_); ptr_ = ptr; n_ = n; } T& operator[](size_t idx) { return ptr_[idx]; } T& operator[](size_t idx) const { return ptr_[idx]; } size_t size() const { return n_; } T back() const { return ptr_[n_-1]; } T* data() { return ptr_; } const T* data() const { return ptr_; } T* begin() { return ptr_; } const T* begin() const { return ptr_; } T* end() { return ptr_ + n_; } const T* end() const { return ptr_ + n_; } private: T* ptr_ = nullptr; size_t n_ = 0; }; /*! * \brief A single row in global histogram index. * Directly represent the global index in the histogram entry. */ using GHistIndexRow = Span<uint32_t const>; // A CSC matrix representing histogram cuts, used in CPU quantile hist. class HistogramCuts { // Using friends to avoid creating a virtual class, since HistogramCuts is used as value // object in many places. friend class SparseCuts; friend class DenseCuts; friend class CutsBuilder; protected: using BinIdx = uint32_t; common::Monitor monitor_; std::vector<bst_float> cut_values_; std::vector<uint32_t> cut_ptrs_; std::vector<float> min_vals_; // storing minimum value in a sketch set. public: HistogramCuts(); HistogramCuts(HistogramCuts const& that) = delete; HistogramCuts(HistogramCuts&& that) noexcept(true) { *this = std::forward<HistogramCuts&&>(that); } HistogramCuts& operator=(HistogramCuts const& that) = delete; HistogramCuts& operator=(HistogramCuts&& that) noexcept(true) { monitor_ = std::move(that.monitor_); cut_ptrs_ = std::move(that.cut_ptrs_); cut_values_ = std::move(that.cut_values_); min_vals_ = std::move(that.min_vals_); return *this; } /* \brief Build histogram cuts. */ void Build(DMatrix* dmat, uint32_t const max_num_bins); /* \brief How many bins a feature has. */ uint32_t FeatureBins(uint32_t feature) const { return cut_ptrs_.at(feature+1) - cut_ptrs_[feature]; } // Getters. Cuts should be of no use after building histogram indices, but currently // it's deeply linked with quantile_hist, gpu sketcher and gpu_hist. So we preserve // these for now. std::vector<uint32_t> const& Ptrs() const { return cut_ptrs_; } std::vector<float> const& Values() const { return cut_values_; } std::vector<float> const& MinValues() const { return min_vals_; } size_t TotalBins() const { return cut_ptrs_.back(); } BinIdx SearchBin(float value, uint32_t column_id) { auto beg = cut_ptrs_.at(column_id); auto end = cut_ptrs_.at(column_id + 1); auto it = std::upper_bound(cut_values_.cbegin() + beg, cut_values_.cbegin() + end, value); if (it == cut_values_.cend()) { it = cut_values_.cend() - 1; } BinIdx idx = it - cut_values_.cbegin(); return idx; } BinIdx SearchBin(Entry const& e) { return SearchBin(e.fvalue, e.index); } }; /* \brief An interface for building quantile cuts. * * `DenseCuts' always assumes there are `max_bins` for each feature, which makes it not * suitable for sparse dataset. On the other hand `SparseCuts' uses `GetColumnBatches', * which doubles the memory usage, hence can not be applied to dense dataset. */ class CutsBuilder { public: using WXQSketch = common::WXQuantileSketch<bst_float, bst_float>; protected: HistogramCuts* p_cuts_; /* \brief return whether group for ranking is used. */ static bool UseGroup(DMatrix* dmat); public: explicit CutsBuilder(HistogramCuts* p_cuts) : p_cuts_{p_cuts} {} virtual ~CutsBuilder() = default; static uint32_t SearchGroupIndFromRow( std::vector<bst_uint> const& group_ptr, size_t const base_rowid) { using KIt = std::vector<bst_uint>::const_iterator; KIt res = std::lower_bound(group_ptr.cbegin(), group_ptr.cend() - 1, base_rowid); // Cannot use CHECK_NE because it will try to print the iterator. bool const found = res != group_ptr.cend() - 1; if (!found) { LOG(FATAL) << "Row " << base_rowid << " does not lie in any group!"; } uint32_t group_ind = std::distance(group_ptr.cbegin(), res); return group_ind; } void AddCutPoint(WXQSketch::SummaryContainer const& summary) { if (summary.size > 1 && summary.size <= 16) { /* specialized code categorial / ordinal data -- use midpoints */ for (size_t i = 1; i < summary.size; ++i) { bst_float cpt = (summary.data[i].value + summary.data[i - 1].value) / 2.0f; if (i == 1 || cpt > p_cuts_->cut_values_.back()) { p_cuts_->cut_values_.push_back(cpt); } } } else { for (size_t i = 2; i < summary.size; ++i) { bst_float cpt = summary.data[i - 1].value; if (i == 2 || cpt > p_cuts_->cut_values_.back()) { p_cuts_->cut_values_.push_back(cpt); } } } } /* \brief Build histogram indices. */ virtual void Build(DMatrix* dmat, uint32_t const max_num_bins) = 0; }; /*! \brief Cut configuration for sparse dataset. */ class SparseCuts : public CutsBuilder { /* \brief Distrbute columns to each thread according to number of entries. */ static std::vector<size_t> LoadBalance(SparsePage const& page, size_t const nthreads); Monitor monitor_; public: explicit SparseCuts(HistogramCuts* container) : CutsBuilder(container) { monitor_.Init(__FUNCTION__); } /* \brief Concatonate the built cuts in each thread. */ void Concat(std::vector<std::unique_ptr<SparseCuts>> const& cuts, uint32_t n_cols); /* \brief Build histogram indices in single thread. */ void SingleThreadBuild(SparsePage const& page, MetaInfo const& info, uint32_t max_num_bins, bool const use_group_ind, uint32_t beg, uint32_t end, uint32_t thread_id); void Build(DMatrix* dmat, uint32_t const max_num_bins) override; }; /*! \brief Cut configuration for dense dataset. */ class DenseCuts : public CutsBuilder { protected: Monitor monitor_; public: explicit DenseCuts(HistogramCuts* container) : CutsBuilder(container) { monitor_.Init(__FUNCTION__); } void Init(std::vector<WXQSketch>* sketchs, uint32_t max_num_bins); void Build(DMatrix* p_fmat, uint32_t max_num_bins) override; }; // FIXME(trivialfis): Merge this into generic cut builder. /*! \brief Builds the cut matrix on the GPU. * * \return The row stride across the entire dataset. */ size_t DeviceSketch (const tree::TrainParam& param, const GenericParameter &learner_param, int gpu_batch_nrows, DMatrix* dmat, HistogramCuts* hmat); /*! * \brief preprocessed global index matrix, in CSR format * Transform floating values to integer index in histogram * This is a global histogram index. */ struct GHistIndexMatrix { /*! \brief row pointer to rows by element position */ // std::vector<size_t> row_ptr; SimpleArray<size_t> row_ptr; /*! \brief The index data */ SimpleArray<uint32_t> index; /*! \brief hit count of each index */ std::vector<size_t> hit_count; /*! \brief The corresponding cuts */ HistogramCuts cut; // Create a global histogram matrix, given cut void Init(DMatrix* p_fmat, int max_num_bins); // get i-th row inline GHistIndexRow operator[](size_t i) const { return {&index[0] + row_ptr[i], static_cast<GHistIndexRow::index_type>( row_ptr[i + 1] - row_ptr[i])}; } inline void GetFeatureCounts(size_t* counts) const { auto nfeature = cut.Ptrs().size() - 1; for (unsigned fid = 0; fid < nfeature; ++fid) { auto ibegin = cut.Ptrs()[fid]; auto iend = cut.Ptrs()[fid + 1]; for (auto i = ibegin; i < iend; ++i) { counts[fid] += hit_count[i]; } } } private: std::vector<size_t> hit_count_tloc_; }; struct GHistIndexBlock { const size_t* row_ptr; const uint32_t* index; inline GHistIndexBlock(const size_t* row_ptr, const uint32_t* index) : row_ptr(row_ptr), index(index) {} // get i-th row inline GHistIndexRow operator[](size_t i) const { return {&index[0] + row_ptr[i], detail::ptrdiff_t(row_ptr[i + 1] - row_ptr[i])}; } }; class ColumnMatrix; class GHistIndexBlockMatrix { public: void Init(const GHistIndexMatrix& gmat, const ColumnMatrix& colmat, const tree::TrainParam& param); inline GHistIndexBlock operator[](size_t i) const { return {blocks_[i].row_ptr_begin, blocks_[i].index_begin}; } inline size_t GetNumBlock() const { return blocks_.size(); } private: std::vector<size_t> row_ptr_; std::vector<uint32_t> index_; const HistogramCuts* cut_; struct Block { const size_t* row_ptr_begin; const size_t* row_ptr_end; const uint32_t* index_begin; const uint32_t* index_end; }; std::vector<Block> blocks_; }; /*! * \brief used instead of GradStats to have float instead of double to reduce histograms * this improves performance by 10-30% and memory consumption for histograms by 2x * accuracy in both cases is the same */ struct GradStatHist { typedef float GradType; /*! \brief sum gradient statistics */ GradType sum_grad; /*! \brief sum hessian statistics */ GradType sum_hess; GradStatHist() : sum_grad{0}, sum_hess{0} { static_assert(sizeof(GradStatHist) == 8, "Size of GradStatHist is not 8 bytes."); } inline void Add(const GradStatHist& b) { sum_grad += b.sum_grad; sum_hess += b.sum_hess; } inline void Add(const tree::GradStats& b) { sum_grad += b.sum_grad; sum_hess += b.sum_hess; } inline void Add(const GradientPair& p) { this->Add(p.GetGrad(), p.GetHess()); } inline void Add(const GradType& grad, const GradType& hess) { sum_grad += grad; sum_hess += hess; } inline tree::GradStats ToGradStat() const { return tree::GradStats(sum_grad, sum_hess); } inline void SetSubstract(const GradStatHist& a, const GradStatHist& b) { sum_grad = a.sum_grad - b.sum_grad; sum_hess = a.sum_hess - b.sum_hess; } inline void SetSubstract(const tree::GradStats& a, const GradStatHist& b) { sum_grad = a.sum_grad - b.sum_grad; sum_hess = a.sum_hess - b.sum_hess; } inline GradType GetGrad() const { return sum_grad; } inline GradType GetHess() const { return sum_hess; } inline static void Reduce(GradStatHist& a, const GradStatHist& b) { // NOLINT(*) a.Add(b); } }; using GHistRow = Span<GradStatHist>; /*! * \brief histogram of gradient statistics for multiple nodes */ class HistCollection { public: // access histogram for i-th node inline GHistRow operator[](bst_uint nid) { AddHistRow(nid); return { const_cast<GradStatHist*>(dmlc::BeginPtr(data_arr_[nid])), nbins_}; } // have we computed a histogram for i-th node? inline bool RowExists(bst_uint nid) const { return nid < data_arr_.size(); } // initialize histogram collection inline void Init(uint32_t nbins) { if (nbins_ != nbins) { data_arr_.clear(); nbins_ = nbins; } } // create an empty histogram for i-th node inline void AddHistRow(bst_uint nid) { if (data_arr_.size() <= nid) { size_t prev = data_arr_.size(); data_arr_.resize(nid + 1); for (size_t i = prev; i < data_arr_.size(); ++i) { data_arr_[i].resize(nbins_); } } } private: /*! \brief number of all bins over all features */ uint32_t nbins_ = 0; std::vector<std::vector<GradStatHist>> data_arr_; }; /*! * \brief builder for histograms of gradient statistics */ class GHistBuilder { public: // initialize builder inline void Init(size_t nthread, uint32_t nbins) { nthread_ = nthread; nbins_ = nbins; } void BuildBlockHist(const std::vector<GradientPair>& gpair, const RowSetCollection::Elem row_indices, const GHistIndexBlockMatrix& gmatb, GHistRow hist) { constexpr int kUnroll = 8; // loop unrolling factor const int32_t nblock = gmatb.GetNumBlock(); const size_t nrows = row_indices.end - row_indices.begin; const size_t rest = nrows % kUnroll; #pragma omp parallel for for (int32_t bid = 0; bid < nblock; ++bid) { auto gmat = gmatb[bid]; for (size_t i = 0; i < nrows - rest; i += kUnroll) { size_t rid[kUnroll]; size_t ibegin[kUnroll]; size_t iend[kUnroll]; GradientPair stat[kUnroll]; for (int k = 0; k < kUnroll; ++k) { rid[k] = row_indices.begin[i + k]; } for (int k = 0; k < kUnroll; ++k) { ibegin[k] = gmat.row_ptr[rid[k]]; iend[k] = gmat.row_ptr[rid[k] + 1]; } for (int k = 0; k < kUnroll; ++k) { stat[k] = gpair[rid[k]]; } for (int k = 0; k < kUnroll; ++k) { for (size_t j = ibegin[k]; j < iend[k]; ++j) { const uint32_t bin = gmat.index[j]; hist[bin].Add(stat[k]); } } } for (size_t i = nrows - rest; i < nrows; ++i) { const size_t rid = row_indices.begin[i]; const size_t ibegin = gmat.row_ptr[rid]; const size_t iend = gmat.row_ptr[rid + 1]; const GradientPair stat = gpair[rid]; for (size_t j = ibegin; j < iend; ++j) { const uint32_t bin = gmat.index[j]; hist[bin].Add(stat); } } } } uint32_t GetNumBins() { return nbins_; } private: /*! \brief number of threads for parallel computation */ size_t nthread_; /*! \brief number of all bins over all features */ uint32_t nbins_; }; void BuildHistLocalDense(size_t istart, size_t iend, size_t nrows, const size_t* rid, const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr, GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat); void BuildHistLocalSparse(size_t istart, size_t iend, size_t nrows, const size_t* rid, const uint32_t* index, const GradientPair::ValueT* pgh, const size_t* row_ptr, GradStatHist::GradType* data_local_hist, GradStatHist* grad_stat); void SubtractionTrick(GHistRow self, GHistRow sibling, GHistRow parent); } // namespace common } // namespace xgboost #endif // XGBOOST_COMMON_HIST_UTIL_H_
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if (MSHADOW_USE_MKL == 1) && defined(_OPENMP) && !defined(__CUDACC__) #define MXNET_USE_MKL_DROPOUT 1 #endif #if MXNET_USE_MKL_DROPOUT #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // MXNET_USE_MKL_DROPOUT #define MXNET_USE_CUDNN_DROPOUT MXNET_USE_CUDNN == 1 && CUDNN_MAJOR >= 7 namespace dropout { enum DropoutOpInputs { kData }; enum DropoutOpOutputs { kOut, kMask }; enum DropoutOpForwardResource { kRandom }; enum DropoutOpMode { kTraining, kAlways }; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; mxnet::TShape axes; dmlc::optional<bool> cudnn_off; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5).set_range(0, 1).describe( "Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe( "Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes) .set_default(mxnet::TShape(0, 0)) .describe("Axes for variational dropout kernel."); DMLC_DECLARE_FIELD(cudnn_off) .set_default(dmlc::optional<bool>(false)) .describe( "Whether to turn off cudnn in dropout operator. " "This option is ignored if axes is specified."); } std::string Mode2String(int mode) { switch (mode) { case dropout::kTraining: return "training"; case dropout::kAlways: return "always"; default: LOG(FATAL) << "Unknown mode enum " << mode; } LOG(FATAL) << "should not reach here "; return ""; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream p_s, mode_s, axes_s, cudnn_off_s; p_s << p; mode_s << mode; axes_s << axes; cudnn_off_s << cudnn_off; (*dict)["p"] = p_s.str(); (*dict)["mode"] = Mode2String(mode); (*dict)["axes"] = axes_s.str(); (*dict)["cudnn_off"] = cudnn_off_s.str(); } }; // struct DropoutParam template <typename xpu, typename DType> class DropoutOp { #if MXNET_USE_MKL_DROPOUT static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } static inline bool MKLAvailable() { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases return sizeof(DType) >= sizeof(int); } // MKL forward pass inline void MKLForward(const OpContext& ctx, const std::vector<TBlob>& in_data, const std::vector<TBlob>& out_data) { Stream<xpu>* s = ctx.get_stream<xpu>(); RandGenerator<xpu, DType>* pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType* outptr = out.dptr_; DType* dataptr = data.dptr_; auto maskptr = reinterpret_cast<int*>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; if (sizeof(DType) > sizeof(int)) { // allocating new buffer to avoiding memory overlapping between `mask.dptr_` and `maskptr` Tensor<xpu, 1, int> temp = ctx.requested[1].get_space_typed<xpu, 1, int>(Shape1(count), s); maskptr = temp.dptr_; } BernoulliGenerate(*pgen, count, this->pkeep_, maskptr); const float pk_1 = 1.0f / this->pkeep_; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { const DType maskVal = static_cast<DType>(maskptr[i]) * pk_1; outptr[i] = dataptr[i] * maskVal; mask.dptr_[i] = maskVal; } } // MKL backward pass inline void MKLBackward(const OpContext& ctx, const std::vector<TBlob>& in_grad, const std::vector<TBlob>& out_data, const std::vector<TBlob>& out_grad) { Stream<xpu>* s = ctx.get_stream<xpu>(); Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType* ingradptr = gdata.dptr_; const DType* outgradptr = grad.dptr_; const DType* maskptr = mask.dptr_; const int count = mask.shape_[0] * mask.shape_[1]; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i]; } } #endif // #if MXNET_USE_MKL_DROPOUT public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType* dropout_out, DType* mask_out, const DType* input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(index_t id, RandGenerator<xpu, DType> gen, const index_t N, const index_t step, DType* mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; explicit DropoutOp(const DropoutParam& param, Context ctx) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; this->dropout_passthrough_ = true; #if MXNET_USE_CUDNN_DROPOUT this->cudnn_off_ = param.cudnn_off && param.cudnn_off.value(); this->ctx_ = ctx; if (ctx.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { dtype_ = mshadow::DataType<DType>::kCudnnFlag; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dx_desc_)); CUDNN_CALL(cudnnCreateTensorDescriptor(&dy_desc_)); CUDNN_CALL(cudnnCreateDropoutDescriptor(&dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } ~DropoutOp() { #if MXNET_USE_CUDNN_DROPOUT if (this->ctx_.dev_type == kGPU && this->pkeep_ > 0 && !this->cudnn_off_) { CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dx_desc_)); CUDNN_CALL(cudnnDestroyTensorDescriptor(dy_desc_)); CUDNN_CALL(cudnnDestroyDropoutDescriptor(dropout_desc_)); } #endif // MXNET_USE_CUDNN_DROPOUT } #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) inline bool CuDNNAvailable() { return this->pkeep_ > 0 && !this->cudnn_off_; } inline void CuDNNForward(const OpContext& ctx, const TBlob& in, const TBlob& mask, const TBlob& out) { Stream<xpu>* s = ctx.get_stream<xpu>(); // set dropout state. ctx.requested[0].get_cudnn_dropout_desc(&dropout_desc_, s, 1.0f - this->pkeep_); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = out.Size(); stride[0] = out.Size(); stride[1] = out.Size(); stride[2] = out.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutGetReserveSpaceSize(x_desc_, &dropout_reserve_byte_)); // cudnn uses bits to record the positions that are dropped, so reserve bytes is always // 1/8 of input size. CHECK_GE(mask.Size() * sizeof(DType), dropout_reserve_byte_) << "The size of the mask space is smaller than the required cudnn reserved space."; CUDNN_CALL(cudnnDropoutForward(s->dnn_handle_, dropout_desc_, x_desc_, in.dptr<DType>(), y_desc_, out.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } inline void CuDNNBackward(const OpContext& ctx, const TBlob& out_grad, const TBlob& mask, const TBlob& in_grad) { Stream<xpu>* s = ctx.get_stream<xpu>(); // describe input/output tensor int dim[4], stride[4]; dim[0] = 1; dim[1] = 1; dim[2] = 1; dim[3] = in_grad.Size(); stride[0] = in_grad.Size(); stride[1] = in_grad.Size(); stride[2] = in_grad.Size(); stride[3] = 1; CUDNN_CALL(cudnnSetTensorNdDescriptor(dy_desc_, dtype_, 4, dim, stride)); CUDNN_CALL(cudnnSetTensorNdDescriptor(dx_desc_, dtype_, 4, dim, stride)); // perform dropout with cudnn CUDNN_CALL(cudnnDropoutBackward(s->dnn_handle_, dropout_desc_, dy_desc_, out_grad.dptr<DType>(), dx_desc_, in_grad.dptr<DType>(), mask.dptr<DType>(), dropout_reserve_byte_)); } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) void Forward(const OpContext& ctx, const std::vector<TBlob>& in_data, const std::vector<OpReqType>& req, const std::vector<TBlob>& out_data) { this->dropout_passthrough_ = true; if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu>* s = ctx.get_stream<xpu>(); const TBlob& in = in_data[dropout::kData]; const TBlob& out = out_data[dropout::kOut]; const TBlob& mask = out_data[dropout::kMask]; if (this->pkeep_ < 1 && (ctx.is_train || this->mode_ == dropout::kAlways)) { this->dropout_passthrough_ = false; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLForward(ctx, in_data, out_data); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNForward(ctx, in, mask, out); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) RandGenerator<xpu, DType>* pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); CHECK(req[dropout::kOut] != kAddTo); LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in.dptr<DType>(), this->pkeep_); return; } else { RandGenerator<xpu, DType>* pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact( in.shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>::template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in.dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { if (req[dropout::kOut] == kWriteInplace) return; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in.dptr<DType>()); }); } } } void Backward(const OpContext& ctx, const std::vector<TBlob>& out_grad, const std::vector<TBlob>& out_data, const std::vector<OpReqType>& req, const std::vector<TBlob>& in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu>* s = ctx.get_stream<xpu>(); if (!this->dropout_passthrough_) { this->dropout_passthrough_ = true; const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; const TBlob& mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { #if MXNET_USE_MKL_DROPOUT if (MKLAvailable()) { MKLBackward(ctx, in_grad, out_data, out_grad); return; } #endif // MXNET_USE_MKL_DROPOUT #if MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) if (CuDNNAvailable()) { CuDNNBackward(ctx, grad, mask, gdata); return; } #endif // MXNET_USE_CUDNN_DROPOUT && defined(__CUDACC__) // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } else { // broardcast mul mxnet::TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact( grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, mshadow_op::mul>, xpu>::template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; /*! \brief Axes on which dropout mask is shared in the form of broadcast multiply */ mxnet::TShape axes_; /*! \brief Flag to record whether forward is executed in pass-through mode */ bool dropout_passthrough_; #if MXNET_USE_CUDNN_DROPOUT bool cudnn_off_; Context ctx_; cudnnDataType_t dtype_; cudnnDropoutDescriptor_t dropout_desc_; size_t dropout_reserve_byte_; cudnnTensorDescriptor_t x_desc_, y_desc_, dx_desc_, dy_desc_; #endif // MXNET_USE_CUDNN_DROPOUT }; // class DropoutOp template <typename xpu> void DropoutCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Forward(ctx, inputs, req, outputs); }); } template <typename xpu> void DropoutGradCompute(const OpStatePtr& state, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType>& op = state.get_state<DropoutOp<xpu, DType>>(); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
MeshToImageEngine.h
/****************************************************************************** * SOFA, Simulation Open-Framework Architecture, development version * * (c) 2006-2017 INRIA, USTL, UJF, CNRS, MGH * * * * This program is free software; you can redistribute it and/or modify it * * under the terms of the GNU Lesser General Public License as published by * * the Free Software Foundation; either version 2.1 of the License, or (at * * your option) any later version. * * * * This program is distributed in the hope that it will be useful, but WITHOUT * * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License * * for more details. * * * * You should have received a copy of the GNU Lesser General Public License * * along with this program. If not, see <http://www.gnu.org/licenses/>. * ******************************************************************************* * Authors: The SOFA Team and external contributors (see Authors.txt) * * * * Contact information: contact@sofa-framework.org * ******************************************************************************/ #ifndef SOFA_IMAGE_MeshToImageEngine_H #define SOFA_IMAGE_MeshToImageEngine_H #include <image/config.h> #include "ImageTypes.h" #include <sofa/helper/rmath.h> #include <sofa/helper/IndexOpenMP.h> #include <sofa/core/DataEngine.h> #include <sofa/core/objectmodel/BaseObject.h> #include <sofa/core/topology/BaseMeshTopology.h> #include <sofa/core/visual/VisualParams.h> #include <sofa/helper/SVector.h> #include <sofa/defaulttype/Vec.h> #include <sofa/defaulttype/Mat.h> #include <sofa/defaulttype/Quat.h> #include <newmat/newmat.h> #include <newmat/newmatap.h> #include <sofa/helper/vectorData.h> #ifdef _OPENMP #include <omp.h> #endif namespace sofa { namespace component { namespace engine { /** * This class rasterizes meshes into a boolean image (1: inside mesh, 0: outside) or a scalar image (val: inside mesh, 0: outside) * \todo adjust type of value, closingValue, backgroundValue, roiValue according to ImageTypes */ template <class _ImageTypes> class MeshToImageEngine : public core::DataEngine { public: typedef core::DataEngine Inherited; SOFA_CLASS(SOFA_TEMPLATE(MeshToImageEngine,_ImageTypes),Inherited); typedef SReal Real; Data< helper::vector<Real> > voxelSize; // should be a Vec<3,Real>, but it is easier to be backward-compatible that way typedef helper::WriteOnlyAccessor<Data< helper::vector<Real> > > waVecReal; Data< defaulttype::Vec<3,unsigned> > nbVoxels; Data< bool > rotateImage; Data< unsigned int > padSize; Data< unsigned int > subdiv; typedef _ImageTypes ImageTypes; typedef typename ImageTypes::T T; typedef typename ImageTypes::imCoord imCoord; typedef helper::ReadAccessor<Data< ImageTypes > > raImage; typedef helper::WriteOnlyAccessor<Data< ImageTypes > > waImage; Data< ImageTypes > image; typedef defaulttype::ImageLPTransform<Real> TransformType; typedef typename TransformType::Coord Coord; typedef helper::ReadAccessor<Data< TransformType > > raTransform; typedef helper::WriteOnlyAccessor<Data< TransformType > > waTransform; Data< TransformType > transform; typedef helper::vector<defaulttype::Vec<3,Real> > SeqPositions; typedef helper::ReadAccessor<Data< SeqPositions > > raPositions; typedef helper::WriteOnlyAccessor<Data< SeqPositions > > waPositions; helper::vectorData< SeqPositions > vf_positions; typedef typename core::topology::BaseMeshTopology::Edge Edge; typedef typename core::topology::BaseMeshTopology::SeqEdges SeqEdges; typedef helper::ReadAccessor<Data< SeqEdges > > raEdges; typedef helper::WriteOnlyAccessor<Data< SeqEdges > > waEdges; helper::vectorData< SeqEdges > vf_edges; typedef typename core::topology::BaseMeshTopology::Triangle Triangle; typedef typename core::topology::BaseMeshTopology::SeqTriangles SeqTriangles; typedef helper::ReadAccessor<Data< SeqTriangles > > raTriangles; typedef helper::WriteOnlyAccessor<Data< SeqTriangles > > waTriangles; helper::vectorData< SeqTriangles > vf_triangles; typedef double ValueType; typedef helper::vector<ValueType> SeqValues; typedef helper::ReadAccessor<Data< SeqValues > > raValues; helper::vectorData< SeqValues > vf_values; helper::vectorData< bool > vf_FillInside; helper::vectorData< ValueType > vf_InsideValues; typedef helper::SVector<typename core::topology::BaseMeshTopology::PointID> SeqIndex; ///< one roi defined as an index list typedef helper::vector<SeqIndex> VecSeqIndex; ///< vector of rois helper::vectorData<VecSeqIndex> vf_roiIndices; ///< vector of rois for each mesh helper::vectorData<SeqValues> vf_roiValue; ///< values for each roi typedef helper::ReadAccessor<Data< VecSeqIndex > > raIndex; Data< ValueType > backgroundValue; Data<unsigned int> f_nbMeshes; Data<bool> gridSnap; Data<bool> worldGridAligned; virtual std::string getTemplateName() const { return templateName(this); } static std::string templateName(const MeshToImageEngine<ImageTypes>* = NULL) { return ImageTypes::Name(); } MeshToImageEngine() : Inherited() , voxelSize(initData(&voxelSize,helper::vector<Real>(3,(Real)1.0),"voxelSize","voxel Size (redondant with and not priority over nbVoxels)")) , nbVoxels(initData(&nbVoxels,defaulttype::Vec<3,unsigned>(0,0,0),"nbVoxels","number of voxel (redondant with and priority over voxelSize)")) , rotateImage(initData(&rotateImage,false,"rotateImage","orient the image bounding box according to the mesh (OBB)")) , padSize(initData(&padSize,(unsigned int)(0),"padSize","size of border in number of voxels")) , subdiv(initData(&subdiv,(unsigned int)(4),"subdiv","number of subdivisions for face rasterization (if needed, increase to avoid holes)")) , image(initData(&image,ImageTypes(),"image","")) , transform(initData(&transform,TransformType(),"transform","")) , vf_positions(this, "position", "input positions for mesh ", helper::DataEngineInput) , vf_edges(this,"edges", "input edges for mesh ", helper::DataEngineInput) , vf_triangles(this,"triangles", "input triangles for mesh ", helper::DataEngineInput) , vf_values(this,"value", "pixel value on mesh surface ", helper::DataEngineInput, SeqValues((size_t)1,(ValueType)1.0)) , vf_FillInside(this,"fillInside", "fill the mesh using insideValue?", helper::DataEngineInput, true) , vf_InsideValues(this,"insideValue", "pixel value inside the mesh", helper::DataEngineInput, (ValueType)1.0) , vf_roiIndices(this,"roiIndices", "List of Regions Of Interest, vertex indices", helper::DataEngineInput) , vf_roiValue(this,"roiValue", "pixel value for ROIs, list of values", helper::DataEngineInput) , backgroundValue(initData(&backgroundValue,0.,"backgroundValue","pixel value at background")) , f_nbMeshes( initData (&f_nbMeshes, (unsigned)1, "nbMeshes", "number of meshes to voxelize (Note that the last one write on the previous ones)") ) , gridSnap(initData(&gridSnap,true,"gridSnap","align voxel centers on voxelSize multiples for perfect image merging (nbVoxels and rotateImage should be off)")) , worldGridAligned(initData(&worldGridAligned, false, "worldGridAligned", "perform rasterization on a world aligned grid using nbVoxels and voxelSize")) { vf_positions.resize(f_nbMeshes.getValue()); vf_edges.resize(f_nbMeshes.getValue()); vf_triangles.resize(f_nbMeshes.getValue()); vf_values.resize(f_nbMeshes.getValue()); vf_FillInside.resize(f_nbMeshes.getValue()); vf_InsideValues.resize(f_nbMeshes.getValue()); vf_roiIndices.resize(f_nbMeshes.getValue()); vf_roiValue.resize(f_nbMeshes.getValue()); this->addAlias(vf_positions[0], "position"); this->addAlias(vf_edges[0], "edges"); this->addAlias(vf_triangles[0], "triangles"); this->addAlias(vf_values[0], "value"); this->addAlias(vf_FillInside[0], "fillInside"); this->addAlias(vf_InsideValues[0], "insideValue"); this->addAlias(vf_roiIndices[0], "roiIndices"); this->addAlias(vf_roiValue[0], "roiValue"); } virtual ~MeshToImageEngine() { } virtual void init() { // backward compatibility (if InsideValue is not set: use first value) for( size_t meshId=0; meshId<vf_InsideValues.size() ; ++meshId ) if(!this->vf_InsideValues[meshId]->isSet() && this->vf_values[meshId]->isSet()) if(meshId>=this->vf_FillInside.size() || this->vf_FillInside[meshId]->getValue()) { this->vf_InsideValues[meshId]->setValue(this->vf_values[meshId]->getValue()[0]); serr<<"InsideValue["<<meshId<<"] is not set -> used Value["<<meshId<<"]="<<this->vf_values[meshId]->getValue()[0]<<" instead"<<sendl; } addInput(&f_nbMeshes); vf_positions.resize(f_nbMeshes.getValue()); vf_edges.resize(f_nbMeshes.getValue()); vf_triangles.resize(f_nbMeshes.getValue()); vf_values.resize(f_nbMeshes.getValue()); vf_FillInside.resize(f_nbMeshes.getValue()); vf_InsideValues.resize(f_nbMeshes.getValue()); vf_roiIndices.resize(f_nbMeshes.getValue()); vf_roiValue.resize(f_nbMeshes.getValue()); addOutput(&image); addOutput(&transform); } void clearImage() { waImage iml(this->image); cimg_library::CImg<T>& im= iml->getCImg(); im.fill((T)0); } virtual void reinit() { vf_positions.resize(f_nbMeshes.getValue()); vf_edges.resize(f_nbMeshes.getValue()); vf_triangles.resize(f_nbMeshes.getValue()); vf_values.resize(f_nbMeshes.getValue()); vf_FillInside.resize(f_nbMeshes.getValue()); vf_InsideValues.resize(f_nbMeshes.getValue()); vf_roiIndices.resize(f_nbMeshes.getValue()); vf_roiValue.resize(f_nbMeshes.getValue()); update(); } /// Parse the given description to assign values to this object's fields and potentially other parameters void parse ( sofa::core::objectmodel::BaseObjectDescription* arg ) { vf_positions.parseSizeData(arg, f_nbMeshes); vf_edges.parseSizeData(arg, f_nbMeshes); vf_triangles.parseSizeData(arg, f_nbMeshes); vf_values.parseSizeData(arg, f_nbMeshes); vf_FillInside.parseSizeData(arg, f_nbMeshes); vf_InsideValues.parseSizeData(arg, f_nbMeshes); vf_roiIndices.parseSizeData(arg, f_nbMeshes); vf_roiValue.parseSizeData(arg, f_nbMeshes); Inherit1::parse(arg); } /// Assign the field values stored in the given map of name -> value pairs void parseFields ( const std::map<std::string,std::string*>& str ) { vf_positions.parseFieldsSizeData(str, f_nbMeshes); vf_edges.parseFieldsSizeData(str, f_nbMeshes); vf_triangles.parseFieldsSizeData(str, f_nbMeshes); vf_values.parseFieldsSizeData(str, f_nbMeshes); vf_FillInside.parseFieldsSizeData(str, f_nbMeshes); vf_InsideValues.parseFieldsSizeData(str, f_nbMeshes); vf_roiIndices.parseFieldsSizeData(str, f_nbMeshes); vf_roiValue.parseFieldsSizeData(str, f_nbMeshes); Inherit1::parseFields(str); } protected: virtual void update() { updateAllInputsIfDirty(); cleanDirty(); // to be backward-compatible, if less than 3 values, fill with the last one waVecReal vs( voxelSize ); unsigned vs_lastid=vs.size()-1; for( unsigned i=vs.size() ; i<3 ; ++i ) vs.push_back( vs[vs_lastid] ); vs.resize(3); waImage iml(this->image); waTransform tr(this->transform); // update transform Real BB[3][2] = { {std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max()} , {std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max()} , {std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max()} }; if(worldGridAligned.getValue() == true) // no transformation, simply assign an image of numVoxel*voxelSize { // min and max centered around origin of transform for(int i=0; i< 3; i++) { BB[i][1] = nbVoxels.getValue()[i]*voxelSize.getValue()[i]*0.5f; BB[i][0] = -BB[i][1]; } } else if(!this->rotateImage.getValue()) // use Axis Aligned Bounding Box { for(size_t j=0; j<3; j++) tr->getRotation()[j]=(Real)0 ; for( unsigned meshId=0; meshId<f_nbMeshes.getValue() ; ++meshId ) { raPositions pos(*this->vf_positions[meshId]); unsigned int nbp = pos.size(); for(size_t i=0; i<nbp; i++) for(size_t j=0; j<3; j++) { if(BB[j][0]>pos[i][j]) BB[j][0]=pos[i][j]; if(BB[j][1]<pos[i][j]) BB[j][1]=pos[i][j]; } } // enlarge a bit the bb to prevent from numerical precision issues in rasterization for(size_t j=0; j<3; j++) { Real EPSILON = (BB[j][1]-BB[j][0])*1E-10; BB[j][1] += EPSILON; BB[j][0] -= EPSILON; } if( nbVoxels.getValue()[0]!=0 && nbVoxels.getValue()[1]!=0 && nbVoxels.getValue()[2]!=0 ) for(size_t j=0; j<3; j++) tr->getScale()[j] = (BB[j][1] - BB[j][0]) / nbVoxels.getValue()[j]; else for(size_t j=0; j<3; j++) tr->getScale()[j] = this->voxelSize.getValue()[j]; if(this->gridSnap.getValue()) if( nbVoxels.getValue()[0]==0 || nbVoxels.getValue()[1]==0 || nbVoxels.getValue()[2]==0 ) { for(size_t j=0; j<3; j++) BB[j][0] = tr->getScale()[j]*floor(BB[j][0]/tr->getScale()[j]); for(size_t j=0; j<3; j++) BB[j][1] = tr->getScale()[j]*ceil(BB[j][1]/tr->getScale()[j]); } for(size_t j=0; j<3; j++) tr->getTranslation()[j]=BB[j][0]+tr->getScale()[j]*0.5-tr->getScale()[j]*this->padSize.getValue(); } else // use Oriented Bounding Box { unsigned nbpTotal = 0; // total points over all meshes // get mean and covariance Coord mean; mean.fill(0); for( unsigned meshId=0; meshId<f_nbMeshes.getValue() ; ++meshId ) { raPositions pos(*this->vf_positions[meshId]); unsigned int nbp = pos.size(); for(size_t i=0; i<nbp; i++) mean+=pos[i]; nbpTotal += nbp; } mean/=(Real)nbpTotal; defaulttype::Mat<3,3,Real> M; M.fill(0); for( unsigned meshId=0; meshId<f_nbMeshes.getValue() ; ++meshId ) { raPositions pos(*this->vf_positions[meshId]); unsigned int nbp = pos.size(); for(size_t i=0; i<nbp; i++) for(size_t j=0; j<3; j++) for(size_t k=j; k<3; k++) M[j][k] += (pos[i][j] - mean[j]) * (pos[i][k] - mean[k]); } M/=(Real)nbpTotal; // get eigen vectors of the covariance matrix NEWMAT::SymmetricMatrix e(3); e = 0.0; for(size_t j=0; j<3; j++) { for(size_t k=j; k<3; k++) e(j+1,k+1) = M[j][k]; for(size_t k=0; k<j; k++) e(k+1,j+1) = e(j+1,k+1); } NEWMAT::DiagonalMatrix D(3); D = 0.0; NEWMAT::Matrix V(3,3); V = 0.0; NEWMAT::Jacobi(e, D, V); for(size_t j=0; j<3; j++) for(size_t k=0; k<3; k++) M[j][k]=V(j+1,k+1); if(determinant(M)<0) M*=(Real)-1.0; defaulttype::Mat<3,3,Real> MT=M.transposed(); // get orientation from eigen vectors helper::Quater< Real > q; q.fromMatrix(M); tr->getRotation()=q.toEulerVector()* (Real)180.0 / (Real)M_PI; // get bb Coord P; for( unsigned meshId=0; meshId<f_nbMeshes.getValue() ; ++meshId ) { raPositions pos(*this->vf_positions[meshId]); unsigned int nbp = pos.size(); for(size_t i=0; i<nbp; i++) { P=MT*(pos[i]); for(size_t j=0; j<3; j++) { if(BB[j][0]>P[j]) BB[j][0]=P[j]; if(BB[j][1]<P[j]) BB[j][1]=P[j]; } } } // enlarge a bit the bb to prevent from numerical precision issues in rasterization for(size_t j=0; j<3; j++) { Real EPSILON = (BB[j][1]-BB[j][0])*1E-10; BB[j][1] += EPSILON; BB[j][0] -= EPSILON; } if( nbVoxels.getValue()[0]!=0 && nbVoxels.getValue()[1]!=0 && nbVoxels.getValue()[2]!=0 ) for(size_t j=0; j<3; j++) tr->getScale()[j] = (BB[j][1] - BB[j][0]) / nbVoxels.getValue()[j]; else for(size_t j=0; j<3; j++) tr->getScale()[j] = this->voxelSize.getValue()[j]; P=Coord(BB[0][0],BB[1][0],BB[2][0]) + tr->getScale()*0.5 - tr->getScale()*this->padSize.getValue(); tr->getTranslation()=M*(P); } tr->getOffsetT()=(Real)0.0; tr->getScaleT()=(Real)1.0; tr->isPerspective()=0; tr->update(); // update of internal data // update image extents unsigned int dim[3]; for(size_t j=0; j<3; j++) dim[j]=ceil((BB[j][1]-BB[j][0])/tr->getScale()[j]+(Real)2.0*this->padSize.getValue()); if(this->worldGridAligned.getValue()==true) for(size_t j=0; j<3; j++) { dim[j]=ceil((BB[j][1]-BB[j][0])/this->voxelSize.getValue()[j]); tr->getScale()[j]= this->voxelSize.getValue()[j]; } if(iml->getCImgList().size() == 0) iml->getCImgList().assign(1,dim[0],dim[1],dim[2],1); else iml->getCImgList()(0).assign(dim[0],dim[1],dim[2],1); // Just realloc the memory of the image to suit new size // Keep it as a pointer since the code will be called recursively cimg_library::CImg<T>& im = iml->getCImg(); im.fill( (T)backgroundValue.getValue() ); for( size_t meshId=0 ; meshId<f_nbMeshes.getValue() ; ++meshId ) rasterizeAndFill ( meshId, im, tr ); if(this->f_printLog.getValue()) sout<<this->getName()<<": Voxelization done"<<sendl; } // regular rasterization like first implementation, with inside filled by the unique value void rasterizeAndFill( const unsigned int &meshId, cimg_library::CImg<T>& im, const waTransform& tr ) { raPositions pos(*this->vf_positions[meshId]); unsigned int nbp = pos.size(); raTriangles tri(*this->vf_triangles[meshId]); unsigned int nbtri = tri.size(); raEdges edg(*this->vf_edges[meshId]); unsigned int nbedg = edg.size(); if(!nbp || (!nbtri && !nbedg) ) { serr<<"no topology defined for mesh "<<meshId<<sendl; return; } unsigned int nbval = this->vf_values[meshId]->getValue().size(); raIndex roiIndices(*this->vf_roiIndices[meshId]); if(roiIndices.size() && !this->vf_roiValue[meshId]->getValue().size()) serr<<"at least one roiValue for mesh "<<meshId<<" needs to be specified"<<sendl; if(this->f_printLog.getValue()) for(size_t r=0;r<roiIndices.size();++r) sout<<this->getName()<<": mesh "<<meshId<<"\t ROI "<<r<<"\t number of vertices= " << roiIndices[r].size() << "\t value= "<<getROIValue(meshId,r)<<sendl; /// colors definition const T FillColor = (T)getValue(meshId,0); const T InsideColor = (T)this->vf_InsideValues[meshId]->getValue(); // T OutsideColor = (T)this->backgroundValue.getValue(); /// draw surface cimg_library::CImg<bool> mask; mask.assign( im.width(), im.height(), im.depth(), 1 ); mask.fill(false); // draw edges if(this->f_printLog.getValue() && nbedg) sout<<this->getName()<<": Voxelizing edges (mesh "<<meshId<<")..."<<sendl; unsigned int subdivValue = this->subdiv.getValue(); std::map<unsigned int,T> edgToValue; // we record special roi values and rasterize them after to prevent from overwriting #ifdef _OPENMP #pragma omp parallel for #endif for(sofa::helper::IndexOpenMP<unsigned int>::type i=0; i<nbedg; i++) { Coord pts[2]; for(size_t j=0; j<2; j++) pts[j] = (tr->toImage(Coord(pos[edg[i][j]]))); T currentColor = FillColor; for(size_t r=0;r<roiIndices.size();++r) { bool isRoi = true; for(size_t j=0; j<2; j++) if(std::find(roiIndices[r].begin(), roiIndices[r].end(), edg[i][j])==roiIndices[r].end()) { isRoi=false; break; } if (isRoi) { currentColor = (T)getROIValue(meshId,r); edgToValue[i]=currentColor; } } if(currentColor == FillColor) { if (nbval>1) draw_line(im,mask,pts[0],pts[1],getValue(meshId,edg[i][0]),getValue(meshId,edg[i][1]),subdivValue); // edge rasterization with interpolated values (if not in roi) else draw_line(im,mask,pts[0],pts[1],currentColor,subdivValue); } } // roi rasterization for(typename std::map<unsigned int,T>::iterator it=edgToValue.begin(); it!=edgToValue.end(); ++it) { Coord pts[2]; for(size_t j=0; j<2; j++) pts[j] = (tr->toImage(Coord(pos[edg[it->first][j]]))); const T& currentColor = it->second; draw_line(im,mask,pts[0],pts[1],currentColor,subdivValue); } // draw filled faces if(this->f_printLog.getValue() && nbtri) sout<<this->getName()<<": Voxelizing triangles (mesh "<<meshId<<")..."<<sendl; std::map<unsigned int,T> triToValue; // we record special roi values and rasterize them after to prevent from overwriting #ifdef _OPENMP #pragma omp parallel for #endif for(sofa::helper::IndexOpenMP<unsigned int>::type i=0; i<nbtri; i++) { Coord pts[3]; for(size_t j=0; j<3; j++) pts[j] = (tr->toImage(Coord(pos[tri[i][j]]))); T currentColor = FillColor; for(size_t r=0;r<roiIndices.size();++r) { bool isRoi = true; for(size_t j=0; j<3; j++) if(std::find(roiIndices[r].begin(), roiIndices[r].end(), tri[i][j])==roiIndices[r].end()) { isRoi=false; break; } if (isRoi) { currentColor = (T)getROIValue(meshId,r); triToValue[i]=currentColor; } } if(currentColor == FillColor) { if (nbval>1) // triangle rasterization with interpolated values (if not in roi) draw_triangle(im,mask,pts[0],pts[1],pts[2],getValue(meshId,tri[i][0]),getValue(meshId,tri[i][1]),getValue(meshId,tri[i][2]),subdivValue); else draw_triangle(im,mask,pts[0],pts[1],pts[2],currentColor,subdivValue); } } // roi rasterization for(typename std::map<unsigned int,T>::iterator it=triToValue.begin(); it!=triToValue.end(); ++it) { Coord pts[3]; for(size_t j=0; j<3; j++) pts[j] = (tr->toImage(Coord(pos[tri[it->first][j]]))); const T& currentColor = it->second; draw_triangle(im,mask,pts[0],pts[1],pts[2],currentColor,subdivValue); } /// fill inside if(this->vf_FillInside[meshId]->getValue()) { if(!isClosed(tri.ref())) sout<<"mesh["<<meshId<<"] might be open, let's try to fill it anyway"<<sendl; // flood fill from the exterior point (0,0,0) with the color outsideColor if(this->f_printLog.getValue()) sout<<this->getName()<<": Filling object (mesh "<<meshId<<")..."<<sendl; static const bool colorTrue=true; mask.draw_fill(0,0,0,&colorTrue); cimg_foroff(mask,off) if(!mask[off]) im[off]=InsideColor; } } /// retrieve input value of vertex 'index' of mesh 'meshId' ValueType getValue( const unsigned int &meshId, const unsigned int &index ) const { if(!this->vf_values[meshId]->getValue().size()) return (ValueType)1.0; return ( index<this->vf_values[meshId]->getValue().size() )? this->vf_values[meshId]->getValue()[index] : this->vf_values[meshId]->getValue()[0]; } /// retrieve value of roi 'index' of mesh 'meshId' ValueType getROIValue( const unsigned int &meshId, const unsigned int &index ) const { if(!this->vf_roiValue[meshId]->getValue().size()) return (ValueType)1.0; return ( index<this->vf_roiValue[meshId]->getValue().size() )? this->vf_roiValue[meshId]->getValue()[index] : this->vf_roiValue[meshId]->getValue()[0]; } /// check if mesh is closed (ie. all edges are present twice in triangle list) bool isClosed( const SeqTriangles& tri ) const { typedef std::pair<unsigned int,unsigned int> edge; typedef std::set< edge > edgeset; typedef typename edgeset::iterator edgesetit; edgeset edges; for(size_t i=0; i<tri.size(); i++) for(size_t j=0; j<3; j++) { unsigned int p1=tri[i][(j==0)?2:j-1],p2=tri[i][j]; edgesetit it=edges.find(edge(p2,p1)); if(it==edges.end()) edges.insert(edge(p1,p2)); else edges.erase(it); } if(edges.empty()) return true; else return false; } virtual void draw(const core::visual::VisualParams* /*vparams*/) { } template<class PixelT> bool isInsideImage(cimg_library::CImg<PixelT>& img, unsigned int x, unsigned int y, unsigned z) { // if(x<0) return false; // if(y<0) return false; // if(z<0) return false; if(x>=(unsigned int)img.width() ) return false; if(y>=(unsigned int)img.height()) return false; if(z>=(unsigned int)img.depth() ) return false; return true; } template<class PixelT> void draw_line(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const PixelT& color,const unsigned int subdiv) // floating point bresenham { Coord P0(p0),P1(p1); Coord delta = P1 - P0; unsigned int dmax = cimg_library::cimg::max(cimg_library::cimg::abs(delta[0]),cimg_library::cimg::abs(delta[1]),cimg_library::cimg::abs(delta[2])); dmax*=subdiv; // divide step to avoid possible holes Coord dP = delta/(Real)dmax; Coord P (P0); for (unsigned int t = 0; t<=dmax; ++t) { unsigned int x=(unsigned int)sofa::helper::round(P[0]), y=(unsigned int)sofa::helper::round(P[1]), z=(unsigned int)sofa::helper::round(P[2]); if(isInsideImage<PixelT>(im,x,y,z)) { im(x,y,z)=color; mask(x,y,z)=true; } P+=dP; } } template<class PixelT> void draw_line(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const Real& color0,const Real& color1,const unsigned int subdiv) // floating point bresenham { Coord P0(p0),P1(p1); Coord delta = P1 - P0; unsigned int dmax = cimg_library::cimg::max(cimg_library::cimg::abs(delta[0]),cimg_library::cimg::abs(delta[1]),cimg_library::cimg::abs(delta[2])); dmax*=subdiv; // divide step to avoid possible holes Coord dP = delta/(Real)dmax; Coord P (P0); for (unsigned int t = 0; t<=dmax; ++t) { Real u = (dmax == 0) ? Real(0.5) : (Real)t / (Real)dmax; PixelT color = (PixelT)(color0 * (1.0 - u) + color1 * u); unsigned int x=(unsigned int)sofa::helper::round(P[0]), y=(unsigned int)sofa::helper::round(P[1]), z=(unsigned int)sofa::helper::round(P[2]); if(isInsideImage<PixelT>(im,x,y,z)) { im(x,y,z)=color; mask(x,y,z)=true; } P+=dP; } } // structure for internal use class _Triangle { public: const Coord *m_p0, *m_p1, *m_p2; Real firstEdgeLength; _Triangle(Coord const& _p0, Coord const& _p1, Coord const& _p2) : m_p0(&_p0), m_p1(&_p1), m_p2(&_p2) { firstEdgeLength = (p0()-p1()).norm(); } Coord const& p0() const {return *m_p0;} Coord const& p1() const {return *m_p1;} Coord const& p2() const {return *m_p2;} inline bool operator< (const _Triangle& rhs) const { return this->firstEdgeLength < rhs.firstEdgeLength; } }; template<class PixelT> void draw_triangle(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const Coord& p2,const PixelT& color,const unsigned int subdiv) { // fill along two directions to be sure that there is no hole, // let's choose the two smaller edges std::vector<_Triangle> triangles; triangles.push_back(_Triangle(p0,p1,p2)); triangles.push_back(_Triangle(p1,p2,p0)); triangles.push_back(_Triangle(p2,p0,p1)); std::sort(triangles.begin(), triangles.end()); _draw_triangle(im, mask, triangles[0].p0(), triangles[0].p1(), triangles[0].p2(), color, subdiv); _draw_triangle(im, mask, triangles[1].p0(), triangles[1].p1(), triangles[1].p2(), color, subdiv); } template<class PixelT> void _draw_triangle(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const Coord& p2,const PixelT& color,const unsigned int subdiv) // double bresenham { Coord P0(p0),P1(p1); Coord delta = P1 - P0; unsigned int dmax = cimg_library::cimg::max(cimg_library::cimg::abs(delta[0]),cimg_library::cimg::abs(delta[1]),cimg_library::cimg::abs(delta[2])); dmax*=subdiv; // divide step to avoid possible holes Coord dP = delta/(Real)dmax; Coord P (P0); for (unsigned int t = 0; t<=dmax; ++t) { this->draw_line(im,mask,P,p2,color,subdiv); P+=dP; } } template<class PixelT> void draw_triangle(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const Coord& p2,const Real& color0,const Real& color1,const Real& color2,const unsigned int subdiv) { // fill along two directions to be sure that there is no hole, // let's choose the two smaller edges std::vector<_Triangle> triangles; triangles.push_back(_Triangle(p0,p1,p2)); triangles.push_back(_Triangle(p1,p2,p0)); triangles.push_back(_Triangle(p2,p0,p1)); std::sort(triangles.begin(), triangles.end()); std::map<Coord,Real> ptoC; ptoC[p0]=color0; ptoC[p1]=color1; ptoC[p2]=color2; _draw_triangle(im, mask, triangles[0].p0(), triangles[0].p1(), triangles[0].p2(), ptoC[triangles[0].p0()], ptoC[triangles[0].p1()], ptoC[triangles[0].p2()], subdiv); _draw_triangle(im, mask, triangles[1].p0(), triangles[1].p1(), triangles[1].p2(), ptoC[triangles[1].p0()], ptoC[triangles[1].p1()], ptoC[triangles[1].p2()], subdiv); } template<class PixelT> void _draw_triangle(cimg_library::CImg<PixelT>& im,cimg_library::CImg<bool>& mask,const Coord& p0,const Coord& p1,const Coord& p2,const Real& color0,const Real& color1,const Real& color2,const unsigned int subdiv) // double bresenham { Coord P0(p0),P1(p1); Coord delta = P1 - P0; unsigned int dmax = cimg_library::cimg::max(cimg_library::cimg::abs(delta[0]),cimg_library::cimg::abs(delta[1]),cimg_library::cimg::abs(delta[2])); dmax*=subdiv; // divide step to avoid possible holes Coord dP = delta/(Real)dmax; Coord P (P0); for (unsigned int t = 0; t<=dmax; ++t) { Real u = (dmax == 0) ? Real(0.5) : (Real)t / (Real)dmax; PixelT color = (PixelT)(color0 * (1.0 - u) + color1 * u); this->draw_line(im,mask,P,p2,color,color2,subdiv); P+=dP; } } }; #if defined(SOFA_EXTERN_TEMPLATE) && !defined(SOFA_IMAGE_MeshToImageEngine_CPP) extern template class SOFA_IMAGE_API MeshToImageEngine<sofa::defaulttype::ImageB>; extern template class SOFA_IMAGE_API MeshToImageEngine<sofa::defaulttype::ImageUC>; extern template class SOFA_IMAGE_API MeshToImageEngine<sofa::defaulttype::ImageUS>; extern template class SOFA_IMAGE_API MeshToImageEngine<sofa::defaulttype::ImageD>; #endif } // namespace engine } // namespace component } // namespace sofa #endif // SOFA_IMAGE_MeshToImageEngine_H
mainOpenMP.c
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ /* * File: main.c * Author: Nikolaos Mamais(2371),Nikolaos Bafatakis(2383),Panagiotis Maroylidis(2431) * * Created on March 19, 2016, 1:30 PM */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #include <math.h> #define upper 30 #define lower 12 /* * */ struct timespec start, end; int main(int argc, char** argv) { FILE *bin_file; int num ; int counter = 0; int k; int j; char temp[100]; clock_gettime(CLOCK_MONOTONIC, &start); if(argc!=6){ printf("Wrong Arguments\n"); return (EXIT_FAILURE); } k=0; num=atoi(argv[1]); #pragma omp parallel private(bin_file,temp) if ((bin_file = fopen(argv[3], "r+")) != NULL) { int num1; #pragma omp for private(j,num1) reduction(+:counter) for (j=k; j < num; j++){ fread(temp, 2, 1, bin_file); num1 = atoi(temp); if (num1 < 12 || num1 > 30) { fseek(bin_file,(j+1)*31, SEEK_SET); continue; } fseek(bin_file, 8, SEEK_CUR); fread(temp, 2, 1, bin_file); num1 = atoi(temp); if (num1 < 12 || num1 > 30) { fseek(bin_file,(j+1)*31, SEEK_SET); continue; } fseek(bin_file, 8, SEEK_CUR); fread(temp, 2, 1, bin_file); num1 = atoi(temp); if (num1 < 12 || num1 > 30) { fseek(bin_file,(j+1)*31, SEEK_SET); continue; } fseek(bin_file,(j+1)*31, SEEK_SET); counter = counter + 1; } } else { printf("Error opening the File"); } #pragma omp barrier fclose(bin_file); clock_gettime(CLOCK_MONOTONIC, &end); printf("\n%d\n", counter); const int DAS_NANO_SECONDS_IN_SEC = 1000000000; long timeElapsed_s = end.tv_sec - start.tv_sec; long timeElapsed_n = end.tv_nsec - start.tv_nsec; //If we have a negative number in timeElapsed_n , borrow a carry from seconds if (timeElapsed_n < 0) { timeElapsed_n = DAS_NANO_SECONDS_IN_SEC + timeElapsed_n; timeElapsed_s--; } printf("Time: %ld.%09ld secs \n", timeElapsed_s, timeElapsed_n); return (EXIT_SUCCESS); }
test_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <string> #include <sstream> #include <iostream> #include <iomanip> #include <algorithm> #include <limits> #include <utility> #include <cstdint> #include <cstdlib> #include <map> extern "C" { #include "mmio.h" } #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <library_types.h> #include <thrust/host_vector.h> #include <thrust/adjacent_difference.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/sequence.h> #include <rmm_utils.h> #include "cugraph.h" #ifndef CUDA_RT_CALL #define CUDA_RT_CALL( call ) \ { \ cudaError_t cudaStatus = call; \ if ( cudaSuccess != cudaStatus ) { \ fprintf(stderr, "ERROR: CUDA RT call \"%s\" in line %d of file %s failed with %s (%d).\n", \ #call, __LINE__, __FILE__, cudaGetErrorString(cudaStatus), cudaStatus); \ } \ } #endif std::function<void(gdf_column*)> gdf_col_deleter = [](gdf_column* col){ if (col) { col->size = 0; if(col->data){ cudaStream_t stream{nullptr}; ALLOC_FREE_TRY(col->data, stream); } delete col; } }; using gdf_column_ptr = typename std::unique_ptr<gdf_column, decltype(gdf_col_deleter)>; std::function<void(gdf_graph*)> gdf_graph_deleter = [](gdf_graph* G){delete G;}; using gdf_graph_ptr = typename std::unique_ptr<gdf_graph,decltype(gdf_graph_deleter)>; std::string getFileName(const std::string& s) { char sep = '/'; #ifdef _WIN32 sep = '\\'; #endif size_t i = s.rfind(sep, s.length()); if (i != std::string::npos) { return(s.substr(i+1, s.length() - i)); } return(""); } template <typename T> void verbose_diff(std::vector<T> & v1, std::vector<T> & v2) { for (unsigned int i = 0; i < v1.size(); ++i) { if (v1[i] != v2[i]) { std::cout << "[" << i <<"] : " << v1[i] << " vs. "<< v2[i]<<std::endl; } } } template <typename T> int eq(std::vector<T> & v1, std::vector<T> & v2) { if (v1 == v2) return 0; else { verbose_diff(v1,v2); return 1; } } template <typename T> void printv(size_t n, T* vec, int offset) { thrust::device_ptr<T> dev_ptr(vec); std::cout.precision(15); std::cout << "sample size = "<< n << ", offset = "<< offset << std::endl; thrust::copy(dev_ptr+offset,dev_ptr+offset+n, std::ostream_iterator<T>(std::cout, " "));//Assume no RMM dependency; TODO: check / test (potential BUG !!!!!) std::cout << std::endl; } template <typename T> void random_vals(std::vector<T> & v) { srand(42); for (auto i = size_t{0}; i < v.size(); i++) v[i]=static_cast<T>(std::rand()%10); } template <typename T_ELEM> void ref_csr2csc (int m, int n, int nnz, const T_ELEM *csrVals, const int *csrRowptr, const int *csrColInd, T_ELEM *cscVals, int *cscRowind, int *cscColptr, int base=0){ int i,j, row, col, index; int * counters; T_ELEM val; /* early return */ if ((m <= 0) || (n <= 0) || (nnz <= 0)){ return; } /* build compressed column pointers */ memset(cscColptr, 0, (n+1)*sizeof(cscColptr[0])); cscColptr[0]=base; for (i=0; i<nnz; i++){ cscColptr[1+csrColInd[i]-base]++; } for(i=0; i<n; i++){ cscColptr[i+1]+=cscColptr[i]; } /* expand row indecis and copy them and values into csc arrays according to permutation */ counters = (int *)malloc(n*sizeof(counters[0])); memset(counters, 0, n*sizeof(counters[0])); for (i=0; i<m; i++){ for (j=csrRowptr[i]; j<csrRowptr[i+1]; j++){ row = i+base; col = csrColInd[j-base]; index=cscColptr[col-base]-base+counters[col-base]; counters[col-base]++; cscRowind[index]=row; if(csrVals!=NULL || cscVals!=NULL){ val = csrVals[j-base]; cscVals[index] = val; } } } free(counters); } template <typename T> int transition_matrix_cpu(int n, int e, int *csrRowPtrA, int *csrColIndA, T *weight, T* is_leaf) //omp_set_num_threads(4); //#pragma omp parallel { int j,row, row_size; //#pragma omp for for (row=0; row<n; row++) { row_size = csrRowPtrA[row+1] - csrRowPtrA[row]; if (row_size == 0) is_leaf[row]=1.0; else { is_leaf[row]=0.0; for (j=csrRowPtrA[row]; j<csrRowPtrA[row+1]; j++) weight[j] = 1.0/row_size; } } return 0; } template <typename T> void printCsrMatI(int m, int n, int nnz,std::vector<int> & csrRowPtr, std::vector<uint16_t> & csrColInd, std::vector<T> & csrVal) { std::vector<T> v(n); std::stringstream ss; ss.str(std::string()); ss << std::fixed; ss << std::setprecision(2); for (int i = 0; i < m; i++) { std::fill(v.begin(),v.end(),0); for (int j = csrRowPtr[i]; j < csrRowPtr[i+1]; j++) v[csrColInd[j]] = csrVal[j]; std::copy(v.begin(), v.end(), std::ostream_iterator<int>(ss, " ")); ss << "\n"; } ss << "\n"; std::cout<<ss.str(); } /// Read matrix properties from Matrix Market file /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param t (Output) MM_typecode with matrix properties. * @param m (Output) Number of matrix rows. * @param n (Output) Number of matrix columns. * @param nnz (Output) Number of non-zero matrix entries. * @return Zero if properties were read successfully. Otherwise * non-zero. */ template <typename IndexType_> int mm_properties(FILE * f, int tg, MM_typecode * t, IndexType_ * m, IndexType_ * n, IndexType_ * nnz) { // Read matrix properties from file int mint, nint, nnzint; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(*t) || !mm_is_coordinate(*t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&mint,&nint,&nnzint)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(*t) && !mm_is_real(*t) && !mm_is_integer(*t) && !mm_is_complex(*t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } *m = mint; *n = nint; *nnz = nnzint; // Find total number of non-zero entries if(tg && !mm_is_general(*t)) { // Non-diagonal entries should be counted twice IndexType_ nnzOld = *nnz; *nnz *= 2; // Diagonal entries should not be double-counted int i; int st; for(i=0; i<nnzOld; ++i) { // Read matrix entry IndexType_ row, col; double rval, ival; if (mm_is_pattern(*t)) st = fscanf(f, "%d %d\n", &row, &col); else if (mm_is_real(*t) || mm_is_integer(*t)) st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Check if entry is diagonal if(row == col) --(*nnz); } } return 0; } /// Read Matrix Market file and convert to COO format matrix /** Matrix Market file is assumed to be a sparse matrix in coordinate * format. * * @param f File stream for Matrix Market file. * @param tg Boolean indicating whether to convert matrix to general * format (from symmetric, Hermitian, or skew symmetric format). * @param nnz Number of non-zero matrix entries. * @param cooRowInd (Output) Row indices for COO matrix. Should have * at least nnz entries. * @param cooColInd (Output) Column indices for COO matrix. Should * have at least nnz entries. * @param cooRVal (Output) Real component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @param cooIVal (Output) Imaginary component of COO matrix * entries. Should have at least nnz entries. Ignored if null * pointer. * @return Zero if matrix was read successfully. Otherwise non-zero. */ template <typename IndexType_, typename ValueType_> int mm_to_coo(FILE *f, int tg, IndexType_ nnz, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal , ValueType_ * cooIVal) { // Read matrix properties from file MM_typecode t; int m, n, nnzOld; if(fseek(f,0,SEEK_SET)) { fprintf(stderr, "Error: could not set position in file\n"); return -1; } if(mm_read_banner(f,&t)) { fprintf(stderr, "Error: could not read Matrix Market file banner\n"); return -1; } if(!mm_is_matrix(t) || !mm_is_coordinate(t)) { fprintf(stderr, "Error: file does not contain matrix in coordinate format\n"); return -1; } if(mm_read_mtx_crd_size(f,&m,&n,&nnzOld)) { fprintf(stderr, "Error: could not read matrix dimensions\n"); return -1; } if(!mm_is_pattern(t) && !mm_is_real(t) && !mm_is_integer(t) && !mm_is_complex(t)) { fprintf(stderr, "Error: matrix entries are not valid type\n"); return -1; } // Add each matrix entry in file to COO format matrix IndexType_ i; // Entry index in Matrix Market file IndexType_ j = 0; // Entry index in COO format matrix for(i=0;i<nnzOld;++i) { // Read entry from file int row, col; double rval, ival; int st; if (mm_is_pattern(t)) { st = fscanf(f, "%d %d\n", &row, &col); rval = 1.0; ival = 0.0; } else if (mm_is_real(t) || mm_is_integer(t)) { st = fscanf(f, "%d %d %lg\n", &row, &col, &rval); ival = 0.0; } else // Complex matrix st = fscanf(f, "%d %d %lg %lg\n", &row, &col, &rval, &ival); if(ferror(f) || (st == EOF)) { fprintf(stderr, "Error: error %d reading Matrix Market file (entry %d)\n", st, i+1); return -1; } // Switch to 0-based indexing --row; --col; // Record entry cooRowInd[j] = row; cooColInd[j] = col; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; // Add symmetric complement of non-diagonal entries if(tg && !mm_is_general(t) && (row!=col)) { // Modify entry value if matrix is skew symmetric or Hermitian if(mm_is_skew(t)) { rval = -rval; ival = -ival; } else if(mm_is_hermitian(t)) { ival = -ival; } // Record entry cooRowInd[j] = col; cooColInd[j] = row; if(cooRVal != NULL) cooRVal[j] = rval; if(cooIVal != NULL) cooIVal[j] = ival; ++j; } } return 0; } /// Compare two tuples based on the element indexed by i class lesser_tuple { const int i; public: lesser_tuple(int _i) : i(_i) {} template<typename Tuple1, typename Tuple2> __host__ __device__ bool operator()(const Tuple1 t1, const Tuple2 t2) { switch(i) { case 0: return (thrust::get<0>(t1) < thrust::get<0>(t2)); case 1: return (thrust::get<1>(t1) < thrust::get<1>(t2)); default: return (thrust::get<0>(t1) < thrust::get<0>(t2)); } } }; /// Sort entries in COO format matrix /** Sort is stable. * * @param nnz Number of non-zero matrix entries. * @param sort_by_row Boolean indicating whether matrix entries * will be sorted by row index or by column index. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component for COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component COO matrix entries. Ignored if * null pointer. */ template <typename IndexType_, typename ValueType_> void coo_sort(IndexType_ nnz, int sort_by_row, IndexType_ * cooRowInd, IndexType_ * cooColInd, ValueType_ * cooRVal, ValueType_ * cooIVal) { // Determine whether to sort by row or by column int i; if(sort_by_row == 0) i = 1; else i = 0; // Apply stable sort using namespace thrust; if((cooRVal==NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz)), lesser_tuple(i)); else if((cooRVal==NULL) && (cooIVal!=NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooIVal+nnz)), lesser_tuple(i)); else if((cooRVal!=NULL) && (cooIVal==NULL)) stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz,cooRVal+nnz)), lesser_tuple(i)); else stable_sort(make_zip_iterator(make_tuple(cooRowInd,cooColInd,cooRVal,cooIVal)), make_zip_iterator(make_tuple(cooRowInd+nnz,cooColInd+nnz, cooRVal+nnz,cooIVal+nnz)), lesser_tuple(i)); } template <typename IndexT> void coo2csr(std::vector<IndexT>& cooRowInd, //in: I[] (overwrite) const std::vector<IndexT>& cooColInd, //in: J[] std::vector<IndexT>& csrRowPtr, //out std::vector<IndexT>& csrColInd) //out { std::vector<std::pair<IndexT,IndexT> > items; for (auto i = size_t{0}; i < cooRowInd.size(); ++i) items.push_back(std::make_pair( cooRowInd[i], cooColInd[i])); //sort pairs std::sort(items.begin(), items.end(),[](const std::pair<IndexT,IndexT> &left, const std::pair<IndexT,IndexT> &right) {return left.first < right.first; }); for (auto i = size_t{0}; i < cooRowInd.size(); ++i) { cooRowInd[i]=items[i].first; // save the sorted rows to compress them later csrColInd[i]=items[i].second; // save the col idx, not sure if they are sorted for each row } // Count number of elements per row for(auto i=size_t{0}; i<cooRowInd.size(); ++i) ++(csrRowPtr[cooRowInd[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(auto i=size_t{0}; i<csrRowPtr.size()-1; ++i) csrRowPtr[i+1] += csrRowPtr[i]; } /// Compress sorted list of indices /** For use in converting COO format matrix to CSR or CSC format. * * @param n Maximum index. * @param nnz Number of non-zero matrix entries. * @param sortedIndices Sorted list of indices (COO format). * @param compressedIndices (Output) Compressed list of indices (CSR * or CSC format). Should have at least n+1 entries. */ template <typename IndexType_> void coo_compress(IndexType_ m, IndexType_ n, IndexType_ nnz, const IndexType_ * __restrict__ sortedIndices, IndexType_ * __restrict__ compressedIndices) { IndexType_ i; // Initialize everything to zero memset(compressedIndices, 0, (m+1)*sizeof(IndexType_)); // Count number of elements per row for(i=0; i<nnz; ++i) ++(compressedIndices[sortedIndices[i]+1]); // Compute cumulative sum to obtain row offsets/pointers for(i=0; i<m; ++i) compressedIndices[i+1] += compressedIndices[i]; } /// Convert COO format matrix to CSR format /** On output, matrix entries in COO format matrix will be sorted * (primarily by row index, secondarily by column index). * * @param m Number of matrix rows. * @param n Number of matrix columns. * @param nnz Number of non-zero matrix entries. * @param cooRowInd Row indices for COO matrix. * @param cooColInd Column indices for COO matrix. * @param cooRVal Real component of COO matrix entries. Ignored if * null pointer. * @param cooIVal Imaginary component of COO matrix entries. Ignored * if null pointer. * @param csrRowPtr Row pointers for CSR matrix. Should have at least * n+1 entries. * @param csrColInd Column indices for CSR matrix (identical to * output of cooColInd). Should have at least nnz entries. Ignored if * null pointer. * @param csrRVal Real component of CSR matrix entries (identical to * output of cooRVal). Should have at least nnz entries. Ignored if * null pointer. * @param csrIVal Imaginary component of CSR matrix entries * (identical to output of cooIVal). Should have at least nnz * entries. Ignored if null pointer. * @return Zero if matrix was converted successfully. Otherwise * non-zero. */ template <typename IndexType_, typename ValueType_> int coo_to_csr(IndexType_ m, IndexType_ n, IndexType_ nnz, IndexType_ * __restrict__ cooRowInd, IndexType_ * __restrict__ cooColInd, ValueType_ * __restrict__ cooRVal, ValueType_ * __restrict__ cooIVal, IndexType_ * __restrict__ csrRowPtr, IndexType_ * __restrict__ csrColInd, ValueType_ * __restrict__ csrRVal, ValueType_ * __restrict__ csrIVal) { // Convert COO to CSR matrix coo_sort(nnz, 0, cooRowInd, cooColInd, cooRVal, cooIVal); coo_sort(nnz, 1, cooRowInd, cooColInd, cooRVal, cooIVal); //coo_sort2<int,float>(m, nnz, cooRowInd, cooColInd); coo_compress(m, n, nnz, cooRowInd, csrRowPtr); // Copy arrays if(csrColInd!=NULL) memcpy(csrColInd, cooColInd, nnz*sizeof(IndexType_)); if((cooRVal!=NULL) && (csrRVal!=NULL)) memcpy(csrRVal, cooRVal, nnz*sizeof(ValueType_)); if((cooIVal!=NULL) && (csrIVal!=NULL)) memcpy(csrIVal, cooIVal, nnz*sizeof(ValueType_)); return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<float>& val ) { size_t is_read1; double* t_storage = new double[n]; is_read1 = fread(t_storage, sizeof(double), n, fpin); for (int i = 0; i < n; i++) { if (t_storage[i] == DBL_MAX) val[i] = FLT_MAX; else if (t_storage[i] == -DBL_MAX) val[i] = -FLT_MAX; else val[i] = static_cast<float>(t_storage[i]); } delete[] t_storage; if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } int read_binary_vector ( FILE* fpin, int n, std::vector<double>& val ) { size_t is_read1; is_read1 = fread(&val[0], sizeof(double), n, fpin); if (is_read1 != (size_t)n) { printf("%s", "I/O fail\n"); return 1; } return 0; } // Creates a gdf_column from a std::vector template <typename col_type> gdf_column_ptr create_gdf_column(std::vector<col_type> const & host_vector) { // Create a new instance of a gdf_column with a custom deleter that will free // the associated device memory when it eventually goes out of scope gdf_column_ptr the_column{new gdf_column, gdf_col_deleter}; // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; return the_column; } // Creates a gdf_column from a std::vector template <typename col_type> void create_gdf_column(std::vector<col_type> const & host_vector, gdf_column * the_column) { // Allocate device storage for gdf_column and copy contents from host_vector const size_t input_size_bytes = host_vector.size() * sizeof(col_type); cudaStream_t stream{nullptr}; ALLOC_TRY((void**)&(the_column->data), input_size_bytes, stream); cudaMemcpy(the_column->data, host_vector.data(), input_size_bytes, cudaMemcpyHostToDevice); // Deduce the type and set the gdf_dtype accordingly gdf_dtype gdf_col_type; if(std::is_same<col_type,int8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,uint8_t>::value) gdf_col_type = GDF_INT8; else if(std::is_same<col_type,int16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,uint16_t>::value) gdf_col_type = GDF_INT16; else if(std::is_same<col_type,int32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,uint32_t>::value) gdf_col_type = GDF_INT32; else if(std::is_same<col_type,int64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,uint64_t>::value) gdf_col_type = GDF_INT64; else if(std::is_same<col_type,float>::value) gdf_col_type = GDF_FLOAT32; else if(std::is_same<col_type,double>::value) gdf_col_type = GDF_FLOAT64; // Fill the gdf_column members the_column->valid = nullptr; the_column->null_count = 0; the_column->size = host_vector.size(); the_column->dtype = gdf_col_type; gdf_dtype_extra_info extra_info; extra_info.time_unit = TIME_UNIT_NONE; the_column->dtype_info = extra_info; } void gdf_col_delete(gdf_column* col) { if (col) { col->size = 0; cudaStream_t stream{nullptr}; if(col->data) ALLOC_FREE_TRY(col->data, stream); #if 1 // If delete col is executed, the memory pointed by col is no longer valid and // can be used in another memory allocation, so executing col->data = nullptr // after delete col is dangerous, also, col = nullptr has no effect here (the // address is passed by value, for col = nullptr should work, the input // parameter should be gdf_column*& col (or alternatively, gdf_column** col and // *col = nullptr also work) col->data = nullptr; delete col; #else delete col; col->data = nullptr; col = nullptr; #endif } } //////////////////////////////////////////////////////////////////////////////// // TODO: move this code to rapids-core //////////////////////////////////////////////////////////////////////////////// // Define RAPIDS_DATASET_ROOT_DIR using a preprocessor variable to // allow for a build to override the default. This is useful for // having different builds for specific default dataset locations. #ifndef RAPIDS_DATASET_ROOT_DIR #define RAPIDS_DATASET_ROOT_DIR "/datasets" #endif static const std::string& get_rapids_dataset_root_dir() { static std::string rdrd(""); // Env var always overrides the value of RAPIDS_DATASET_ROOT_DIR if (rdrd == "") { const char* envVar = std::getenv("RAPIDS_DATASET_ROOT_DIR"); rdrd = (envVar != NULL) ? envVar : RAPIDS_DATASET_ROOT_DIR; } return rdrd; }
colorspace.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE % % C O O L O O R R SS P P A A C E % % C O O L O O RRRR SSS PPPP AAAAA C EEE % % C O O L O O R R SS P A A C E % % CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE % % % % % % MagickCore Image Colorspace Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/property.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/utility.h" /* Typedef declarations. */ typedef struct _TransformPacket { MagickRealType x, y, z; } TransformPacket; /* Forward declarations. */ static MagickBooleanType TransformsRGBImage(Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o l o r s p a c e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageColorspaceType() returns the potential type of image: % sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc. % % To ensure the image type matches its potential, use SetImageColorspaceType(): % % (void) SetImageColorspaceType(image,GetImageColorspaceType(image), % exception); % % The format of the GetImageColorspaceType method is: % % ColorspaceType GetImageColorspaceType(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ColorspaceType GetImageColorspaceType(const Image *image, ExceptionInfo *exception) { ColorspaceType colorspace; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colorspace=image->colorspace; type=IdentifyImageType(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) colorspace=GRAYColorspace; return(colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + s R G B T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBTransformImage() converts the reference image from sRGB to an alternate % colorspace. The transformation matrices are not the standard ones: the % weights are rescaled to normalized the range of the transformed values to % be [0..QuantumRange]. % % The format of the sRGBTransformImage method is: % % MagickBooleanType sRGBTransformImage(Image *image, % const ColorspaceType colorspace,EsceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace to transform the image to. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertRGBToCMY(const double red,const double green, const double blue,double *cyan,double *magenta,double *yellow) { *cyan=QuantumScale*(QuantumRange-red); *magenta=QuantumScale*(QuantumRange-green); *yellow=QuantumScale*(QuantumRange-blue); } static inline void ConvertXYZToLMS(const double x,const double y, const double z,double *L,double *M,double *S) { *L=0.7328*x+0.4296*y-0.1624*z; *M=(-0.7036*x+1.6975*y+0.0061*z); *S=0.0030*x+0.0136*y+0.9834*z; } static void ConvertRGBToLMS(const double red,const double green, const double blue,double *L,double *M,double *S) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLMS(X,Y,Z,L,M,S); } static void ConvertRGBToLab(const double red,const double green, const double blue,double *L,double *a,double *b) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLab(X,Y,Z,L,a,b); } static void ConvertRGBToLuv(const double red,const double green, const double blue,double *L,double *u,double *v) { double X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); ConvertXYZToLuv(X,Y,Z,L,u,v); } static void ConvertRGBToxyY(const double red,const double green, const double blue,double *low_x,double *low_y,double *cap_Y) { double gamma, X, Y, Z; ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); gamma=PerceptibleReciprocal(X+Y+Z); *low_x=gamma*X; *low_y=gamma*Y; *cap_Y=Y; } static void ConvertRGBToYDbDr(const double red,const double green, const double blue,double *Y,double *Db,double *Dr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5; *Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5; } static void ConvertRGBToYIQ(const double red,const double green, const double blue,double *Y,double *I,double *Q) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5; *Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5; } static void ConvertRGBToYPbPr(const double red,const double green, const double blue,double *Y,double *Pb,double *Pr) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5; *Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5; } static void ConvertRGBToYCbCr(const double red,const double green, const double blue,double *Y,double *Cb,double *Cr) { ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr); } static void ConvertRGBToYUV(const double red,const double green, const double blue,double *Y,double *U,double *V) { *Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue); *U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5; *V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5; } static MagickBooleanType sRGBTransformImage(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define sRGBTransformImageTag "RGBTransform/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo primary_info; register ssize_t i; ssize_t y; TransformPacket *x_map, *y_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(colorspace != sRGBColorspace); assert(colorspace != TransparentColorspace); assert(colorspace != UndefinedColorspace); status=MagickTrue; progress=0; switch (colorspace) { case CMYKColorspace: { PixelInfo zero; /* Convert RGB to CMYK colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertRGBToCMYK(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(DecodePixelGamma(gray)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case GRAYColorspace: { /* Transform image from sRGB to GRAY. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelGray(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); image->type=GrayscaleType; return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from sRGB to target colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case CMYColorspace: { ConvertRGBToCMY(red,green,blue,&X,&Y,&Z); break; } case HCLColorspace: { ConvertRGBToHCL(red,green,blue,&X,&Y,&Z); break; } case HCLpColorspace: { ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z); break; } case HSBColorspace: { ConvertRGBToHSB(red,green,blue,&X,&Y,&Z); break; } case HSIColorspace: { ConvertRGBToHSI(red,green,blue,&X,&Y,&Z); break; } case HSLColorspace: { ConvertRGBToHSL(red,green,blue,&X,&Y,&Z); break; } case HSVColorspace: { ConvertRGBToHSV(red,green,blue,&X,&Y,&Z); break; } case HWBColorspace: { ConvertRGBToHWB(red,green,blue,&X,&Y,&Z); break; } case LabColorspace: { ConvertRGBToLab(red,green,blue,&X,&Y,&Z); break; } case LCHColorspace: case LCHabColorspace: { ConvertRGBToLCHab(red,green,blue,&X,&Y,&Z); break; } case LCHuvColorspace: { ConvertRGBToLCHuv(red,green,blue,&X,&Y,&Z); break; } case LMSColorspace: { ConvertRGBToLMS(red,green,blue,&X,&Y,&Z); break; } case LuvColorspace: { ConvertRGBToLuv(red,green,blue,&X,&Y,&Z); break; } case xyYColorspace: { ConvertRGBToxyY(red,green,blue,&X,&Y,&Z); break; } case XYZColorspace: { ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z); break; } case YCbCrColorspace: { ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z); break; } case YDbDrColorspace: { ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z); break; } case YIQColorspace: { ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z); break; } case YPbPrColorspace: { ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z); break; } case YUVColorspace: { ConvertRGBToYUV(red,green,blue,&X,&Y,&Z); break; } default: { X=QuantumScale*red; Y=QuantumScale*green; Z=QuantumScale*blue; break; } } SetPixelRed(image,ClampToQuantum(QuantumRange*X),q); SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q); SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { #define DisplayGamma (1.0/1.7) #define FilmGamma 0.6 #define ReferenceBlack 95.0 #define ReferenceWhite 685.0 const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform RGB to Log colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+ log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002/ film_gamma))/1024.0)); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=(double) DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=(double) DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q); SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))], q); SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform image from sRGB to linear RGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(&primary_info,0,sizeof(primary_info)); switch (colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B I and Q, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.33333*(double) i); x_map[i].y=(MagickRealType) (0.50000*(double) i); x_map[i].z=(MagickRealType) (-0.25000*(double) i); y_map[i].x=(MagickRealType) (0.33334*(double) i); y_map[i].y=(MagickRealType) (0.00000*(double) i); y_map[i].z=(MagickRealType) (0.50000*(double) i); z_map[i].x=(MagickRealType) (0.33333*(double) i); z_map[i].y=(MagickRealType) (-0.50000*(double) i); z_map[i].z=(MagickRealType) (-0.25000*(double) i); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.601): Y = 0.2988390*R+0.5868110*G+0.1143500*B Cb= -0.1687367*R-0.3312640*G+0.5000000*B Cr= 0.5000000*R-0.4186880*G-0.0813120*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.298839*(double) i); x_map[i].y=(MagickRealType) (-0.1687367*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.586811*(double) i); y_map[i].y=(MagickRealType) (-0.331264*(double) i); y_map[i].z=(MagickRealType) (-0.418688*(double) i); z_map[i].x=(MagickRealType) (0.114350*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.081312*(double) i); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables (ITU-R BT.709): Y = 0.212656*R+0.715158*G+0.072186*B Cb= -0.114572*R-0.385428*G+0.500000*B Cr= 0.500000*R-0.454153*G-0.045847*B Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0 through QuantumRange. */ primary_info.y=(double) (MaxMap+1.0)/2.0; primary_info.z=(double) (MaxMap+1.0)/2.0; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (0.212656*(double) i); x_map[i].y=(MagickRealType) (-0.114572*(double) i); x_map[i].z=(MagickRealType) (0.500000*(double) i); y_map[i].x=(MagickRealType) (0.715158*(double) i); y_map[i].y=(MagickRealType) (-0.385428*(double) i); y_map[i].z=(MagickRealType) (-0.454153*(double) i); z_map[i].x=(MagickRealType) (0.072186*(double) i); z_map[i].y=(MagickRealType) (0.500000*(double) i); z_map[i].z=(MagickRealType) (-0.045847*(double) i); } break; } case YCCColorspace: { /* Initialize YCC tables: Y = 0.298839*R+0.586811*G+0.114350*B C1= -0.298839*R-0.586811*G+0.88600*B C2= 0.70100*R-0.586811*G-0.114350*B YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156)); primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137)); for (i=0; i <= (ssize_t) (0.018*MaxMap); i++) { x_map[i].x=0.005382*i; x_map[i].y=(-0.003296)*i; x_map[i].z=0.009410*i; y_map[i].x=0.010566*i; y_map[i].y=(-0.006471)*i; y_map[i].z=(-0.007880)*i; z_map[i].x=0.002052*i; z_map[i].y=0.009768*i; z_map[i].z=(-0.001530)*i; } for ( ; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.298839*(1.099*i-0.099); x_map[i].y=(-0.298839)*(1.099*i-0.099); x_map[i].z=0.70100*(1.099*i-0.099); y_map[i].x=0.586811*(1.099*i-0.099); y_map[i].y=(-0.586811)*(1.099*i-0.099); y_map[i].z=(-0.586811)*(1.099*i-0.099); z_map[i].x=0.114350*(1.099*i-0.099); z_map[i].y=0.88600*(1.099*i-0.099); z_map[i].z=(-0.114350)*(1.099*i-0.099); } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); x_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].x=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; z_map[i].y=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert from sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; register unsigned int blue, green, red; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelRed(image,q))); green=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelGreen(image,q))); blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType) GetPixelBlue(image,q))); pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+ primary_info.x; pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+ primary_info.y; pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+ primary_info.z; SetPixelRed(image,ScaleMapToQuantum(pixel.red),q); SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q); SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,sRGBTransformImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { register unsigned int blue, green, red; /* Convert PseudoClass image. */ for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z; image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red); image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green); image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,colorspace,exception) == MagickFalse) return(MagickFalse); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColorspace() sets the colorspace member of the Image structure. % % The format of the SetImageColorspace method is: % % MagickBooleanType SetImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptiionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { ImageType type; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->colorspace == colorspace) return(MagickTrue); image->colorspace=colorspace; image->rendering_intent=UndefinedIntent; image->gamma=1.000/2.200; (void) memset(&image->chromaticity,0,sizeof(image->chromaticity)); type=image->type; if (IsGrayColorspace(colorspace) != MagickFalse) { if (colorspace == LinearGRAYColorspace) image->gamma=1.000; type=GrayscaleType; } else if ((IsRGBColorspace(colorspace) != MagickFalse) || (colorspace == XYZColorspace) || (colorspace == xyYColorspace)) image->gamma=1.000; else { image->rendering_intent=PerceptualIntent; image->chromaticity.red_primary.x=0.6400; image->chromaticity.red_primary.y=0.3300; image->chromaticity.red_primary.z=0.0300; image->chromaticity.green_primary.x=0.3000; image->chromaticity.green_primary.y=0.6000; image->chromaticity.green_primary.z=0.1000; image->chromaticity.blue_primary.x=0.1500; image->chromaticity.blue_primary.y=0.0600; image->chromaticity.blue_primary.z=0.7900; image->chromaticity.white_point.x=0.3127; image->chromaticity.white_point.y=0.3290; image->chromaticity.white_point.z=0.3583; } status=SyncImagePixelCache(image,exception); image->type=type; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageGray() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities and changes the type of the image to % bi-level or grayscale. % % The format of the SetImageGray method is: % % MagickBooleanType SetImageGray(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageGray(Image *image, ExceptionInfo *exception) { const char *value; ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image)) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); type=IdentifyImageGray(image,exception); if (type == UndefinedType) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMonochrome() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange and changes the type of the image to bi-level. % % The format of the SetImageMonochrome method is: % % MagickBooleanType SetImageMonochrome(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMonochrome(Image *image, ExceptionInfo *exception) { const char *value; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); value=GetImageProperty(image,"colorspace:auto-grayscale",exception); if (IsStringFalse(value) != MagickFalse) return(MagickFalse); if (IdentifyImageMonochrome(image,exception) == MagickFalse) return(MagickFalse); image->colorspace=GRAYColorspace; if (SyncImagePixelCache((Image *) image,exception) == MagickFalse) return(MagickFalse); image->type=BilevelType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImageColorspace() transforms an image colorspace, changing the % image data to reflect the new colorspace. % % The format of the TransformImageColorspace method is: % % MagickBooleanType TransformImageColorspace(Image *image, % const ColorspaceType colorspace,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colorspace: the colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType TransformImageColorspace(Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == colorspace) return(SetImageColorspace(image,colorspace,exception)); (void) DeleteImageProfile(image,"icc"); (void) DeleteImageProfile(image,"icm"); if (colorspace == UndefinedColorspace) return(SetImageColorspace(image,colorspace,exception)); /* Convert the reference image from an alternate colorspace to sRGB. */ if (IssRGBColorspace(colorspace) != MagickFalse) return(TransformsRGBImage(image,exception)); status=MagickTrue; if (IssRGBColorspace(image->colorspace) == MagickFalse) status=TransformsRGBImage(image,exception); if (status == MagickFalse) return(status); /* Convert the reference image from sRGB to an alternate colorspace. */ if (sRGBTransformImage(image,colorspace,exception) == MagickFalse) status=MagickFalse; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m s R G B I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformsRGBImage() converts the reference image from an alternate % colorspace to sRGB. The transformation matrices are not the standard ones: % the weights are rescaled to normalize the range of the transformed values % to be [0..QuantumRange]. % % The format of the TransformsRGBImage method is: % % MagickBooleanType TransformsRGBImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void ConvertCMYToRGB(const double cyan,const double magenta, const double yellow,double *red,double *green,double *blue) { *red=QuantumRange*(1.0-cyan); *green=QuantumRange*(1.0-magenta); *blue=QuantumRange*(1.0-yellow); } static inline void ConvertLMSToXYZ(const double L,const double M,const double S, double *X,double *Y,double *Z) { *X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S; *Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S; *Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S; } static inline void ConvertLMSToRGB(const double L,const double M, const double S,double *red,double *green,double *blue) { double X, Y, Z; ConvertLMSToXYZ(L,M,S,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertLuvToRGB(const double L,const double u, const double v,double *red,double *green,double *blue) { double X, Y, Z; ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline ssize_t RoundToYCC(const double value) { if (value <= 0.0) return(0); if (value >= 1388.0) return(1388); return((ssize_t) (value+0.5)); } static inline void ConvertLabToRGB(const double L,const double a, const double b,double *red,double *green,double *blue) { double X, Y, Z; ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),&X,&Y,&Z); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static inline void ConvertxyYToRGB(const double low_x,const double low_y, const double cap_Y,double *red,double *green,double *blue) { double gamma, X, Y, Z; gamma=PerceptibleReciprocal(low_y); X=gamma*cap_Y*low_x; Y=cap_Y; Z=gamma*cap_Y*(1.0-low_x-low_y); ConvertXYZToRGB(X,Y,Z,red,green,blue); } static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr, double *red,double *green,double *blue) { *red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+ 1.4019995886561440468*(Pr-0.5)); *green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)- 0.71413649331646789076*(Pr-0.5)); *blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+ 2.1453384174593273e-06*(Pr-0.5)); } static void ConvertYCbCrToRGB(const double Y,const double Cb, const double Cr,double *red,double *green,double *blue) { ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue); } static void ConvertYIQToRGB(const double Y,const double I,const double Q, double *red,double *green,double *blue) { *red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754* (Q-0.5)); *green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427* (Q-0.5)); *blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374* (Q-0.5)); } static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr, double *red,double *green,double *blue) { *red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)- 0.52591263066186533*(Dr-0.5)); *green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+ 0.26789932820759876*(Dr-0.5)); *blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)- 7.9202543533108e-05*(Dr-0.5)); } static void ConvertYUVToRGB(const double Y,const double U,const double V, double *red,double *green,double *blue) { *red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825* (V-0.5)); *green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797* (V-0.5)); *blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04* (V-0.5)); } static MagickBooleanType TransformsRGBImage(Image *image, ExceptionInfo *exception) { #define TransformsRGBImageTag "Transform/Image" static const float YCCMap[1389] = { 0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f, 0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f, 0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f, 0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f, 0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f, 0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f, 0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f, 0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f, 0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f, 0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f, 0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f, 0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f, 0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f, 0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f, 0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f, 0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f, 0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f, 0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f, 0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f, 0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f, 0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f, 0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f, 0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f, 0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f, 0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f, 0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f, 0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f, 0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f, 0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f, 0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f, 0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f, 0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f, 0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f, 0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f, 0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f, 0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f, 0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f, 0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f, 0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f, 0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f, 0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f, 0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f, 0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f, 0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f, 0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f, 0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f, 0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f, 0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f, 0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f, 0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f, 0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f, 0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f, 0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f, 0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f, 0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f, 0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f, 0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f, 0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f, 0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f, 0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f, 0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f, 0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f, 0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f, 0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f, 0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f, 0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f, 0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f, 0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f, 0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f, 0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f, 0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f, 0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f, 0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f, 0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f, 0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f, 0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f, 0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f, 0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f, 0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f, 0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f, 0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f, 0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f, 0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f, 0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f, 0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f, 0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f, 0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f, 0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f, 0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f, 0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f, 0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f, 0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f, 0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f, 0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f, 0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f, 0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f, 0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f, 0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f, 0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f, 0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f, 0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f, 0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f, 0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f, 0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f, 0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f, 0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f, 0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f, 0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f, 0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f, 0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f, 0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f, 0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f, 0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f, 0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f, 0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f, 0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f, 0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f, 0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f, 0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f, 0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f, 0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f, 0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f, 0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f, 0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f, 0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f, 0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f, 0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f, 0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f, 0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f, 0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f, 0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f, 0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f, 0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f, 0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f, 0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f, 0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f, 0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f, 0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f, 0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f, 0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f, 0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f, 0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f, 0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f, 0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f, 0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f, 0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f, 0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f, 0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f, 0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f, 0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f, 0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f, 0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f, 0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f, 0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f, 0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f, 0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f, 0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f, 0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f, 0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f, 0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f, 0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f, 0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f, 0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f, 0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f, 0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f, 0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f, 0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f, 0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f, 0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f, 0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f, 0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f, 0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f, 0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f, 0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f, 0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f, 0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f, 0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f, 0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f, 0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f, 0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f, 0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f, 0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f, 0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f, 0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f, 0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f, 0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f, 0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f, 0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f, 0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f, 0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f, 0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f, 0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f, 0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f, 0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f, 0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f, 0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f, 0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f, 0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f, 0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f, 0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f, 0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f, 0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f, 0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f, 0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f, 0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f, 0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f, 0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f, 0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f, 0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f, 0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f, 0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f, 0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f, 0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f, 0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f, 0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f, 0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f, 0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f, 0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f, 0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f, 0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f, 0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f, 0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f, 0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f, 0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f, 0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f, 0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f, 0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f, 0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f, 0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f, 0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f, 0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f, 0.998559f, 0.999280f, 1.000000f }; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; TransformPacket *y_map, *x_map, *z_map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; switch (image->colorspace) { case CMYKColorspace: { PixelInfo zero; /* Transform image from CMYK to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); ConvertCMYKToRGB(&pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LinearGRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); gray=EncodePixelGamma(gray); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case GRAYColorspace: { /* Transform linear GRAY to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { MagickRealType gray; gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+ 0.072186*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(gray),q); SetPixelGreen(image,ClampToQuantum(gray),q); SetPixelBlue(image,ClampToQuantum(gray),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case CMYColorspace: case HCLColorspace: case HCLpColorspace: case HSBColorspace: case HSIColorspace: case HSLColorspace: case HSVColorspace: case HWBColorspace: case LabColorspace: case LCHColorspace: case LCHabColorspace: case LCHuvColorspace: case LMSColorspace: case LuvColorspace: case xyYColorspace: case XYZColorspace: case YCbCrColorspace: case YDbDrColorspace: case YIQColorspace: case YPbPrColorspace: case YUVColorspace: { /* Transform image from source colorspace to sRGB. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red, X, Y, Z; X=QuantumScale*GetPixelRed(image,q); Y=QuantumScale*GetPixelGreen(image,q); Z=QuantumScale*GetPixelBlue(image,q); switch (image->colorspace) { case CMYColorspace: { ConvertCMYToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLColorspace: { ConvertHCLToRGB(X,Y,Z,&red,&green,&blue); break; } case HCLpColorspace: { ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue); break; } case HSBColorspace: { ConvertHSBToRGB(X,Y,Z,&red,&green,&blue); break; } case HSIColorspace: { ConvertHSIToRGB(X,Y,Z,&red,&green,&blue); break; } case HSLColorspace: { ConvertHSLToRGB(X,Y,Z,&red,&green,&blue); break; } case HSVColorspace: { ConvertHSVToRGB(X,Y,Z,&red,&green,&blue); break; } case HWBColorspace: { ConvertHWBToRGB(X,Y,Z,&red,&green,&blue); break; } case LabColorspace: { ConvertLabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ConvertLCHabToRGB(X,Y,Z,&red,&green,&blue); break; } case LCHuvColorspace: { ConvertLCHuvToRGB(X,Y,Z,&red,&green,&blue); break; } case LMSColorspace: { ConvertLMSToRGB(X,Y,Z,&red,&green,&blue); break; } case LuvColorspace: { ConvertLuvToRGB(X,Y,Z,&red,&green,&blue); break; } case xyYColorspace: { ConvertxyYToRGB(X,Y,Z,&red,&green,&blue); break; } case XYZColorspace: { ConvertXYZToRGB(X,Y,Z,&red,&green,&blue); break; } case YCbCrColorspace: { ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue); break; } case YDbDrColorspace: { ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue); break; } case YIQColorspace: { ConvertYIQToRGB(X,Y,Z,&red,&green,&blue); break; } case YPbPrColorspace: { ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue); break; } case YUVColorspace: { ConvertYUVToRGB(X,Y,Z,&red,&green,&blue); break; } default: { red=QuantumRange*X; green=QuantumRange*Y; blue=QuantumRange*Z; break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case LogColorspace: { const char *value; double black, density, film_gamma, gamma, reference_black, reference_white; Quantum *logmap; /* Transform Log to sRGB colorspace. */ density=DisplayGamma; gamma=DisplayGamma; value=GetImageProperty(image,"gamma",exception); if (value != (const char *) NULL) gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL)); film_gamma=FilmGamma; value=GetImageProperty(image,"film-gamma",exception); if (value != (const char *) NULL) film_gamma=StringToDouble(value,(char **) NULL); reference_black=ReferenceBlack; value=GetImageProperty(image,"reference-black",exception); if (value != (const char *) NULL) reference_black=StringToDouble(value,(char **) NULL); reference_white=ReferenceWhite; value=GetImageProperty(image,"reference-white",exception); if (value != (const char *) NULL) reference_white=StringToDouble(value,(char **) NULL); logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*logmap)); if (logmap == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002/ film_gamma); for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++) logmap[i]=(Quantum) 0; for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++) logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)* (pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002/ film_gamma)-black)); for ( ; i <= (ssize_t) MaxMap; i++) logmap[i]=QuantumRange; if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))]; green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))]; blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))]; SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType) red)),q); SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType) green)),q); SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType) blue)),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); logmap=(Quantum *) RelinquishMagickMemory(logmap); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } case RGBColorspace: case scRGBColorspace: { /* Transform linear RGB to sRGB colorspace. */ if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=(ssize_t) image->columns; x != 0; x--) { double blue, green, red; red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q)); green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q)); blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q)); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(status); } default: break; } /* Allocate the tables. */ x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*x_map)); y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*y_map)); z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL, sizeof(*z_map)); if ((x_map == (TransformPacket *) NULL) || (y_map == (TransformPacket *) NULL) || (z_map == (TransformPacket *) NULL)) { if (z_map != (TransformPacket *) NULL) z_map=(TransformPacket *) RelinquishMagickMemory(z_map); if (y_map != (TransformPacket *) NULL) y_map=(TransformPacket *) RelinquishMagickMemory(y_map); if (x_map != (TransformPacket *) NULL) x_map=(TransformPacket *) RelinquishMagickMemory(x_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } switch (image->colorspace) { case OHTAColorspace: { /* Initialize OHTA tables: I1 = 0.33333*R+0.33334*G+0.33333*B I2 = 0.50000*R+0.00000*G-0.50000*B I3 =-0.25000*R+0.50000*G-0.25000*B R = I1+1.00000*I2-0.66668*I3 G = I1+0.00000*I2+1.33333*I3 B = I1-1.00000*I2-0.66668*I3 I and Q, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*(double) i); y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*(double) i); y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap)); z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap)); } break; } case Rec601YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.402000*Cr G = Y-0.344136*Cb-0.714136*Cr B = Y+1.772000*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=0.99999999999914679361*(double) i; y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap); z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap); x_map[i].y=0.99999975910502514331*(double) i; y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap); z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap); x_map[i].z=1.00000124040004623180*(double) i; y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap); z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap); } break; } case Rec709YCbCrColorspace: { /* Initialize YCbCr tables: R = Y +1.574800*Cr G = Y-0.187324*Cb-0.468124*Cr B = Y+1.855600*Cb Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0 through QuantumRange. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*i); y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap)); x_map[i].y=(MagickRealType) (1.0*i); y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap)); z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap)); x_map[i].z=(MagickRealType) (1.0*i); y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap)); z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap)); } break; } case YCCColorspace: { /* Initialize YCC tables: R = Y +1.340762*C2 G = Y-0.317038*C1-0.682243*C2 B = Y+1.632639*C1 YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.3584000*(double) i); y_map[i].x=(MagickRealType) 0.0000000; z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].y=(MagickRealType) (1.3584000*(double) i); y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(137)))); x_map[i].z=(MagickRealType) (1.3584000*(double) i); y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double) ScaleQuantumToMap(ScaleCharToQuantum(156)))); z_map[i].z=(MagickRealType) 0.0000000; } break; } default: { /* Linear conversion tables. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i <= (ssize_t) MaxMap; i++) { x_map[i].x=(MagickRealType) (1.0*(double) i); y_map[i].x=(MagickRealType) 0.0; z_map[i].x=(MagickRealType) 0.0; x_map[i].y=(MagickRealType) 0.0; y_map[i].y=(MagickRealType) (1.0*(double) i); z_map[i].y=(MagickRealType) 0.0; x_map[i].z=(MagickRealType) 0.0; y_map[i].z=(MagickRealType) 0.0; z_map[i].z=(MagickRealType) (1.0*(double) i); } break; } } /* Convert to sRGB. */ switch (image->storage_class) { case DirectClass: default: { /* Convert DirectClass image. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t blue, green, red; red=ScaleQuantumToMap(GetPixelRed(image,q)); green=ScaleQuantumToMap(GetPixelGreen(image,q)); blue=ScaleQuantumToMap(GetPixelBlue(image,q)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } SetPixelRed(image,ClampToQuantum(pixel.red),q); SetPixelGreen(image,ClampToQuantum(pixel.green),q); SetPixelBlue(image,ClampToQuantum(pixel.blue),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransformsRGBImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); break; } case PseudoClass: { /* Convert PseudoClass image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { PixelInfo pixel; register size_t blue, green, red; red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red)); green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green)); blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue)); pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x; pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y; pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z; if (image->colorspace == YCCColorspace) { pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/ (double) MaxMap)]; pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/ (double) MaxMap)]; pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/ (double) MaxMap)]; } else { pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red); pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green); pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue); } image->colormap[i].red=(double) ClampToQuantum(pixel.red); image->colormap[i].green=(double) ClampToQuantum(pixel.green); image->colormap[i].blue=(double) ClampToQuantum(pixel.blue); } (void) SyncImage(image,exception); break; } } /* Relinquish resources. */ z_map=(TransformPacket *) RelinquishMagickMemory(z_map); y_map=(TransformPacket *) RelinquishMagickMemory(y_map); x_map=(TransformPacket *) RelinquishMagickMemory(x_map); if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse) return(MagickFalse); return(MagickTrue); }
CouplingProblem.h
#pragma once #include "PhaseFieldProblem.h" #include "NavierStokesProblem.h" #include "base_problems/CouplingBaseProblem2_cxx11.h" namespace AMDiS { namespace extensions { class Coupling : public CouplingBaseProblem<ProblemStat, base_problems::PhaseFieldProblem, base_problems::NavierStokesProblem> { using Super = CouplingBaseProblem<ProblemStat, base_problems::PhaseFieldProblem, base_problems::NavierStokesProblem>; public: Coupling(base_problems::PhaseFieldProblem& vecProb, base_problems::NavierStokesProblem& nsProb) : Super("main", vecProb, nsProb) , vecProb_(vecProb) , nsProb_(nsProb) { Parameters::get("main->v0", v0_); } virtual void initData() override { Super::initData(); phi_.reset(new DOFVector<double>(nsProb_.getFeSpace(0), "phi")); force_.reset(new DOFVector<WorldVector<double>>(nsProb_.getFeSpace(0), "mu_grad_phi")); for (int i = 0; i < vecProb_.getNumProblems(); ++i) { phis_.emplace_back(new DOFVector<double>(nsProb_.getFeSpace(0), "phi_i")); mus_.emplace_back(new DOFVector<double>(nsProb_.getFeSpace(0), "mu_i")); velocitiesX_.emplace_back(new DOFVector<double>(vecProb_.getFeSpace(i,0), "velX_i")); velocitiesY_.emplace_back(new DOFVector<double>(vecProb_.getFeSpace(i,0), "velY_i")); } velocities_.resize(vecProb_.getNumProblems()); for (int i = 0; i < vecProb_.getNumProblems(); ++i) { velocities_[i][0] = velocitiesX_[i].get(); velocities_[i][1] = velocitiesY_[i].get(); } nsProb_.setPhi(phi_.get()); fileWriter_.reset(new FileWriter("phase->space->output", getMesh(), phi_.get())); } virtual void solveInitialProblem(AdaptInfo* adaptInfo) override { vecProb_.solveInitialProblem(adaptInfo); getCoarseningManager()->globalCoarsen(getMesh(), -20); adapt_global_mesh(); local_to_global(0, phis_); make_phi(phis_, *phi_); local_to_global(1, mus_); make_force(phis_, mus_, *force_); nsProb_.solveInitialProblem(adaptInfo); } virtual void transferInitialSolution(AdaptInfo *adaptInfo) override { Super::transferInitialSolution(adaptInfo); fileWriter_->writeFiles(adaptInfo, false); } virtual void initTimestep(AdaptInfo* adaptInfo) override { Super::initTimestep(adaptInfo); #pragma omp parallel for for (int i = 0; i < vecProb_.getNumProblems(); ++i) { velocitiesX_[i]->interpol(nsProb_.getProblem()->getSolution(0)); velocitiesY_[i]->interpol(nsProb_.getProblem()->getSolution(1)); } } virtual void closeTimestep(AdaptInfo* adaptInfo) override { vecProb_.closeTimestep(adaptInfo); adapt_global_mesh(); local_to_global(0, phis_); make_phi(phis_, *phi_); local_to_global(1, mus_); make_force(phis_, mus_, *force_); // refine/coarsen global mesh along interface extensions::RefinementExpression(getMesh()).refine(function_(vecProb_.indicator("global", 0.95), valueOf(*phi_) )); nsProb_.closeTimestep(adaptInfo); fileWriter_->writeFiles(adaptInfo, false); } virtual void fillCouplingOperators() override { for (int i = 0; i < vecProb_.getNumProblems(); ++i) fillCouplingOperatorsVec(vecProb_.getProblem(i), i); for (int i = 0; i < dow_; ++i) { // force term in NS-equation Operator* opInterface = new Operator(nsProb_.getFeSpace(i)); addZOT(opInterface, v0_*componentOf(*force_,i)); nsProb_.getProblem()->addVectorOperator(opInterface, i); } } void fillCouplingOperatorsVec(ProblemStat* prob, int i) { Operator* opAdvect = new Operator(prob->getFeSpace(0)); addFOT(opAdvect, -valueOf(velocities_[i]), GRD_PSI); prob->addMatrixOperator(opAdvect, 0,0); } protected: Mesh* getMesh() { return nsProb_.getMesh(0); } Mesh* getMesh(int i) { return vecProb_.getMesh(i); } RefinementManager* getRefinementManager() { return nsProb_.getProblem()->getRefinementManager(); } CoarseningManager* getCoarseningManager() { return nsProb_.getProblem()->getCoarseningManager(); } void adapt_global_mesh() { MeshStructure meshStructure{}; meshStructure.init(getMesh()); for (int i = 0; i < vecProb_.getNumProblems(); ++i) { MeshStructure m{}; m.init(getMesh(i)); meshStructure.merge(&m); } meshStructure.fitMeshToStructure(getMesh(), getRefinementManager(), false, -1, true); } void local_to_global(int comp, std::vector<std::unique_ptr<DOFVector<double>>>& globalSolution) { assert( globalSolution.size() == vecProb_.getNumProblems() ); // copy local vector to global vector in parallel #pragma omp parallel for for (int i = 0; i < vecProb_.getNumProblems(); ++i) globalSolution[i]->interpol(vecProb_.getProblem(i)->getSolution(comp)); } void make_phi(std::vector<std::unique_ptr<DOFVector<double>>> const& globalSolution, DOFVector<double>& phi) const { assert( globalSolution.size() == vecProb_.getNumProblems() ); Max<double> f{}; // merge individual global vectors phi.set(-1.0); for (int i = 0; i < vecProb_.getNumProblems(); ++i) transformDOF(globalSolution[i].get(), &phi, &phi, &f); } void make_force(std::vector<std::unique_ptr<DOFVector<double>>> const& phi, std::vector<std::unique_ptr<DOFVector<double>>> const& mu, DOFVector<WorldVector<double>>& force) const { assert( phi.size() == mu.size() && phi.size() == vecProb_.getNumProblems() ); force << valueOf(*mu[0]) * gradientOf(*phi[0]); for (int i = 1; i < vecProb_.getNumProblems(); ++i) force << valueOf(force) + valueOf(*mu[i]) * gradientOf(*phi[i]); } private: base_problems::PhaseFieldProblem& vecProb_; base_problems::NavierStokesProblem& nsProb_; std::unique_ptr<DOFVector<double>> phi_; std::unique_ptr<DOFVector<WorldVector<double>>> force_; std::vector<std::unique_ptr<DOFVector<double>>> phis_; std::vector<std::unique_ptr<DOFVector<double>>> mus_; std::vector<std::unique_ptr<DOFVector<double>>> velocitiesX_, velocitiesY_; std::vector<WorldVector<DOFVector<double>*>> velocities_; std::unique_ptr<FileWriter> fileWriter_; double v0_ = 1.0; int dow_ = Global::getGeo(WORLD); }; } } // end namespaces
fig4.32-private-clause.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int main() { int i, n = 5; int a; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); #endif #pragma omp parallel for private(i,a) for (i=0; i<n; i++) { a = i+1; printf("Thread %d has a value of a = %d for i = %d\n", omp_get_thread_num(),a,i); } /*-- End of parallel for --*/ return(0); }
tinyexr.h
#ifndef TINYEXR_H_ #define TINYEXR_H_ /* Copyright (c) 2014 - 2020, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) #define TINYEXR_X86_OR_X64_CPU 1 #else #define TINYEXR_X86_OR_X64_CPU 0 #endif #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || TINYEXR_X86_OR_X64_CPU #define TINYEXR_LITTLE_ENDIAN 1 #else #define TINYEXR_LITTLE_ENDIAN 0 #endif // Use miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 // tile format image; // not zero for only a single-part "normal" tiled file (according to spec.) int tiled; int long_name; // long name attribute // deep image(EXR 2.0); // for a multi-part file, indicates that at least one part is of type deep* (according to spec.) int non_image; int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRBox2i { int min_x; int min_y; int max_x; int max_y; } EXRBox2i; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; EXRBox2i data_window; EXRBox2i display_window; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; // for a single-part file, agree with the version field bit 11 // for a multi-part file, it is consistent with the type of part int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) // name attribute required for multipart files; // must be unique and non empty (according to spec.); // use EXRSetNameAttr for setting value; // max 255 character allowed - excluding terminating zero char name[256]; } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. struct _EXRImage* next_level; // NULL if scanline format or image is the last level. int level_x; // x level index int level_y; // y level index unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifying layer name. Assume EXR image // contains A(single channel alpha) or RGB(A) channels. Application must free // image data as returned by `out_rgba` Result image format is: float x RGBA x // width x hight Returns negative value and may set error string in `err` when // there's an error When the specified layer name is not found in the EXR file, // the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory // after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(will be filled when the function returns error // code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Returns the number of resolution levels of the image (including the base) extern int EXRNumLevels(const EXRImage* exr_image); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Set name attribute of EXRHeader struct (it makes a copy) extern void EXRSetNameAttr(EXRHeader *exr_header, const char* name); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Frees internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Frees internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Frees error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRMultipartImageToFile(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err); // Saves multi-channel, multi-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // File global attributes (eg. display_window) must be set in the first header. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRMultipartImageToMemory(const EXRImage *images, const EXRHeader **exr_headers, unsigned int num_parts, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEFINED #define TINYEXR_IMPLEMENTATION_DEFINED #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #ifndef NOMINMAX #define NOMINMAX #endif #include <windows.h> // for UTF-8 #endif #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #include <set> // https://stackoverflow.com/questions/5047971/how-do-i-check-for-c11-support #if __cplusplus > 199711L || (defined(_MSC_VER) && _MSC_VER >= 1900) #define TINYEXR_HAS_CXX11 (1) // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #include <miniz.h> #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Weverything" #endif #include "zfp.h" #ifdef __clang__ #pragma clang diagnostic pop #endif #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(int *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } static void swap4(float *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else float tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef TINYEXR_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 union FP32 { unsigned int u; float f; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if TINYEXR_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(&outLen); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int requested_pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; typedef struct { int min_x; int min_y; int max_x; int max_y; } Box2iInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; Box2iInfo data_window; int line_order; Box2iInfo display_window; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tiled; // Non-zero if the part is tiled. int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; // required for multi-part or non-image files std::string name; // required for multi-part or non-image files std::string type; void clear() { channels.clear(); attributes.clear(); data_window.min_x = 0; data_window.min_y = 0; data_window.max_x = 0; data_window.max_y = 0; line_order = 0; display_window.min_x = 0; display_window.min_y = 0; display_window.max_x = 0; display_window.max_y = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tiled = 0; tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; name.clear(); type.clear(); } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(&info.pixel_type); tinyexr::swap4(&info.x_sampling); tinyexr::swap4(&info.y_sampling); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].requested_pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(&pixel_type); tinyexr::swap4(&x_sampling); tinyexr::swap4(&y_sampling); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // mz_ulong outSize = mz_compressBound(src_size); int ret = mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressible run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierarchical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- unsigned int len : 8; // code length 0 unsigned int lit : 24; // lit p size unsigned int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new unsigned int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; unsigned int precision; unsigned int __pad0; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* unsigned int __pad1; ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0; } }; static bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes, std::string *err) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0)) { if (attributes[i].size == 1) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; break; } else { if (err) { (*err) += "zfpCompressionType attribute must be uchar(1 byte) type.\n"; } return false; } } } if (!foundType) { if (err) { (*err) += "`zfpCompressionType` attribute not found.\n"; } return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionRate` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionPrecision` attribute not found.\n"; } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } if (err) { (*err) += "`zfpCompressionTolerance` attribute not found.\n"; } } else { if (err) { (*err) += "Unknown value specified for `zfpCompressionType`.\n"; } } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, size_t num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = size_t(dst_width) * size_t(dst_num_lines) * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, static_cast<unsigned int>(dst_width), static_cast<unsigned int>(dst_num_lines) * static_cast<unsigned int>(num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimension */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = size_t(dst_width) * size_t(dst_num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // decompress 4x4 pixel block. for (size_t y = 0; y < size_t(dst_num_lines); y += 4) { for (size_t x = 0; x < size_t(dst_width); x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * size_t(dst_width) + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. static bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, static_cast<unsigned int>(width), static_cast<unsigned int>(num_lines * num_channels)); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = size_t(width) * size_t(num_lines); for (size_t c = 0; c < size_t(num_channels); c++) { // compress 4x4 pixel block. for (size_t y = 0; y < size_t(num_lines); y += 4) { for (size_t x = 0; x < size_t(width); x += 4) { float fblock[16]; for (size_t j = 0; j < 4; j++) { for (size_t i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * size_t(width) + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = static_cast<unsigned int>(zfp_stream_compressed_size(zfp)); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // heuristics #define TINYEXR_DIMENSION_THRESHOLD (1024 * 8192) // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; std::string e; if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { // Here, data_width and data_height are the dimensions of the current (sub)level. if (tile_size_x * tile_offset_x > data_width || tile_size_y * tile_offset_y > data_height) { return false; } // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } #ifdef _WIN32 static inline std::wstring UTF8ToWchar(const std::string &str) { int wstr_size = MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), NULL, 0); std::wstring wstr(wstr_size, 0); MultiByteToWideChar(CP_UTF8, 0, str.data(), (int)str.size(), &wstr[0], (int)wstr.size()); return wstr; } #endif static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; bool has_name = false; bool has_type = false; info->name.clear(); info->type.clear(); info->data_window.min_x = 0; info->data_window.min_y = 0; info->data_window.max_x = 0; info->data_window.max_y = 0; info->line_order = 0; // @fixme info->display_window.min_x = 0; info->display_window.min_y = 0; info->display_window.max_x = 0; info->display_window.max_y = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tiled = 0; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; // For a multipart file, the version field 9th bit is 0. if ((version->tiled || version->multipart || version->non_image) && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); if (x_size > static_cast<unsigned int>(std::numeric_limits<int>::max()) || y_size > static_cast<unsigned int>(std::numeric_limits<int>::max())) { if (err) { (*err) = "Tile sizes were invalid."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; info->tiled = 1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->data_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->data_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->data_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->data_window.min_x); tinyexr::swap4(&info->data_window.min_y); tinyexr::swap4(&info->data_window.max_x); tinyexr::swap4(&info->data_window.max_y); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window.min_x, &data.at(0), sizeof(int)); memcpy(&info->display_window.min_y, &data.at(4), sizeof(int)); memcpy(&info->display_window.max_x, &data.at(8), sizeof(int)); memcpy(&info->display_window.max_y, &data.at(12), sizeof(int)); tinyexr::swap4(&info->display_window.min_x); tinyexr::swap4(&info->display_window.min_y); tinyexr::swap4(&info->display_window.max_x); tinyexr::swap4(&info->display_window.max_y); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4(&info->pixel_aspect_ratio); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4(&info->screen_window_center[0]); tinyexr::swap4(&info->screen_window_center[1]); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4(&info->screen_window_width); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(&info->chunk_count); } } else if (attr_name.compare("name") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->name.resize(len); info->name.assign(reinterpret_cast<const char*>(&data[0]), len); has_name = true; } } else if (attr_name.compare("type") == 0) { if (!data.empty() && data[0]) { data.push_back(0); size_t len = strlen(reinterpret_cast<const char*>(&data[0])); info->type.resize(len); info->type.assign(reinterpret_cast<const char*>(&data[0]), len); has_type = true; } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (version->multipart || version->non_image) { if (!has_name) { ss_err << "\"name\" attribute not found in the header." << std::endl; } if (!has_type) { ss_err << "\"type\" attribute not found in the header." << std::endl; } } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window.min_x = info.display_window.min_x; exr_header->display_window.min_y = info.display_window.min_y; exr_header->display_window.max_x = info.display_window.max_x; exr_header->display_window.max_y = info.display_window.max_y; exr_header->data_window.min_x = info.data_window.min_x; exr_header->data_window.min_y = info.data_window.min_y; exr_header->data_window.max_x = info.data_window.max_x; exr_header->data_window.max_y = info.data_window.max_y; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tiled = info.tiled; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; EXRSetNameAttr(exr_header, info.name.c_str()); if (!info.type.empty()) { if (info.type == "scanlineimage") { assert(!exr_header->tiled); } else if (info.type == "tiledimage") { assert(exr_header->tiled); } else if (info.type == "deeptile") { exr_header->non_image = 1; assert(exr_header->tiled); } else if (info.type == "deepscanline") { exr_header->non_image = 1; assert(!exr_header->tiled); } else { assert(false); } } exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy pointer exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } struct OffsetData { OffsetData() : num_x_levels(0), num_y_levels(0) {} std::vector<std::vector<std::vector <tinyexr::tinyexr_uint64> > > offsets; int num_x_levels; int num_y_levels; }; int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: return 0; case TINYEXR_TILE_MIPMAP_LEVELS: return lx; case TINYEXR_TILE_RIPMAP_LEVELS: return lx + ly * num_x_levels; default: assert(false); } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { assert(level >= 0); int b = (int)(1u << (unsigned)level); int level_size = toplevel_size / b; if (tile_rounding_mode == TINYEXR_TILE_ROUND_UP && level_size * b < toplevel_size) level_size += 1; return std::max(level_size, 1); } static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, const OffsetData& offset_data, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const unsigned char* head, const size_t size, std::string* err) { int num_channels = exr_header->num_channels; int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; enum { EF_SUCCESS = 0, EF_INVALID_DATA = 1, EF_INSUFFICIENT_DATA = 2, EF_FAILED_TO_DECODE = 4 }; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<unsigned> error_flag(EF_SUCCESS); #else unsigned error_flag(EF_SUCCESS); #endif // Although the spec says : "...the data window is subdivided into an array of smaller rectangles...", // the IlmImf library allows the dimensions of the tile to be larger (or equal) than the dimensions of the data window. #if 0 if ((exr_header->tile_size_x > exr_image->width || exr_header->tile_size_y > exr_image->height) && exr_image->level_x == 0 && exr_image->level_y == 0) { if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } #endif exr_image->tiles = static_cast<EXRTile*>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); int x_tile = tile_idx % num_x_tiles; int y_tile = tile_idx / num_x_tiles; // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) tinyexr::tinyexr_uint64 offset = offset_data.offsets[level_index][y_tile][x_tile]; if (offset + sizeof(int) * 5 > size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } size_t data_size = size_t(size - (offset + sizeof(int) * 5)); const unsigned char* data_ptr = reinterpret_cast<const unsigned char*>(head + offset); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4(&tile_coordinates[0]); tinyexr::swap4(&tile_coordinates[1]); tinyexr::swap4(&tile_coordinates[2]); tinyexr::swap4(&tile_coordinates[3]); if (tile_coordinates[2] != exr_image->level_x) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } if (tile_coordinates[3] != exr_image->level_y) { // Invalid data. error_flag |= EF_INVALID_DATA; continue; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(&data_len); if (data_len < 2 || size_t(data_len) > data_size) { // Insufficient data size. error_flag |= EF_INSUFFICIENT_DATA; continue; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, exr_image->width, exr_image->height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // Failed to decode tile data. error_flag |= EF_FAILED_TO_DECODE; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto& t : workers) { t.join(); } #else } // parallel for #endif // Even in the event of an error, the reserved memory may be freed. exr_image->num_channels = num_channels; exr_image->num_tiles = static_cast<int>(num_tiles); if (error_flag) err_code = TINYEXR_ERROR_INVALID_DATA; if (err) { if (error_flag & EF_INSUFFICIENT_DATA) { (*err) += "Insufficient data length.\n"; } if (error_flag & EF_FAILED_TO_DECODE) { (*err) += "Failed to decode tile data.\n"; } } return err_code; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const OffsetData& offset_data, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, exr_header->custom_attributes, int(exr_header->num_custom_attributes), err)) { return TINYEXR_ERROR_INVALID_HEADER; } #endif } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_y < exr_header->data_window.min_y) { if (err) { (*err) += "Invalid data window.\n"; } return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if ((data_width > TINYEXR_DIMENSION_THRESHOLD) || (data_height > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tiled) { if ((exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) || (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD)) { if (err) { std::stringstream ss; ss << "tile with or tile height too large. tile width: " << exr_header->tile_size_x << ", " << "tile height = " << exr_header->tile_size_y << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } } const std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) { EXRImage* level_image = NULL; for (int level = 0; level < offset_data.num_x_levels; ++level) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); level_image->level_x = level; level_image->level_y = level; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } else { EXRImage* level_image = NULL; for (int level_y = 0; level_y < offset_data.num_y_levels; ++level_y) for (int level_x = 0; level_x < offset_data.num_x_levels; ++level_x) { if (!level_image) { level_image = exr_image; } else { level_image->next_level = new EXRImage; InitEXRImage(level_image->next_level); level_image = level_image->next_level; } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); level_image->level_x = level_x; level_image->level_y = level_y; int ret = DecodeTiledLevel(level_image, exr_header, offset_data, channel_offset_list, pixel_data_size, head, size, err); if (ret != TINYEXR_SUCCESS) return ret; } } } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(&line_no); tinyexr::swap4(&data_len); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window.max_y + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window.min_y); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window.min_y; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(&y); tinyexr::swap4(&data_len); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int FloorLog2(unsigned x) { // // For x > 0, floorLog2(y) returns floor(log(x)/log(2)). // int y = 0; while (x > 1) { y += 1; x >>= 1u; } return y; } static int CeilLog2(unsigned x) { // // For x > 0, ceilLog2(y) returns ceil(log(x)/log(2)). // int y = 0; int r = 0; while (x > 1) { if (x & 1) r = 1; y += 1; x >>= 1u; } return y + r; } static int RoundLog2(int x, int tile_rounding_mode) { return (tile_rounding_mode == TINYEXR_TILE_ROUND_DOWN) ? FloorLog2(static_cast<unsigned>(x)) : CeilLog2(static_cast<unsigned>(x)); } static int CalculateNumXLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int w = max_x - min_x + 1; num = RoundLog2(w, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static int CalculateNumYLevels(const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num = 0; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: num = 1; break; case TINYEXR_TILE_MIPMAP_LEVELS: { int w = max_x - min_x + 1; int h = max_y - min_y + 1; num = RoundLog2(std::max(w, h), exr_header->tile_rounding_mode) + 1; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { int h = max_y - min_y + 1; num = RoundLog2(h, exr_header->tile_rounding_mode) + 1; } break; default: assert(false); } return num; } static void CalculateNumTiles(std::vector<int>& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, i, tile_rounding_mode); assert(l <= std::numeric_limits<int>::max() - size + 1); numTiles[i] = (l + size - 1) / size; } } static void PrecalculateTileInfo(std::vector<int>& num_x_tiles, std::vector<int>& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; int max_x = exr_header->data_window.max_x; int min_y = exr_header->data_window.min_y; int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); int num_y_levels = CalculateNumYLevels(exr_header); num_x_tiles.resize(num_x_levels); num_y_tiles.resize(num_y_levels); CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, exr_header->tile_rounding_mode); CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, exr_header->tile_rounding_mode); } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { offset_data.offsets.resize(1); offset_data.offsets[0].resize(1); offset_data.offsets[0][0].resize(num_blocks); offset_data.num_x_levels = 1; offset_data.num_y_levels = 1; } // Return sum of tile blocks. static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector<int>& num_x_tiles, const std::vector<int>& num_y_tiles) { int num_tile_blocks = 0; offset_data.num_x_levels = static_cast<int>(num_x_tiles.size()); offset_data.num_y_levels = static_cast<int>(num_y_tiles.size()); switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: assert(offset_data.num_x_levels == offset_data.num_y_levels); offset_data.offsets.resize(offset_data.num_x_levels); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { offset_data.offsets[l].resize(num_y_tiles[l]); for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[l]); num_tile_blocks += num_x_tiles[l]; } } break; case TINYEXR_TILE_RIPMAP_LEVELS: offset_data.offsets.resize(static_cast<size_t>(offset_data.num_x_levels) * static_cast<size_t>(offset_data.num_y_levels)); for (int ly = 0; ly < offset_data.num_y_levels; ++ly) { for (int lx = 0; lx < offset_data.num_x_levels; ++lx) { int l = ly * offset_data.num_x_levels + lx; offset_data.offsets[l].resize(num_y_tiles[ly]); for (size_t dy = 0; dy < offset_data.offsets[l].size(); ++dy) { offset_data.offsets[l][dy].resize(num_x_tiles[lx]); num_tile_blocks += num_x_tiles[lx]; } } } break; default: assert(false); } return num_tile_blocks; } static bool IsAnyOffsetsAreInvalid(const OffsetData& offset_data) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) if (reinterpret_cast<const tinyexr::tinyexr_int64&>(offset_data.offsets[l][dy][dx]) <= 0) return true; return false; } static bool isValidTile(const EXRHeader* exr_header, const OffsetData& offset_data, int dx, int dy, int lx, int ly) { if (lx < 0 || ly < 0 || dx < 0 || dy < 0) return false; int num_x_levels = offset_data.num_x_levels; int num_y_levels = offset_data.num_y_levels; switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: if (lx == 0 && ly == 0 && offset_data.offsets.size() > 0 && offset_data.offsets[0].size() > static_cast<size_t>(dy) && offset_data.offsets[0][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_MIPMAP_LEVELS: if (lx < num_x_levels && ly < num_y_levels && offset_data.offsets.size() > static_cast<size_t>(lx) && offset_data.offsets[lx].size() > static_cast<size_t>(dy) && offset_data.offsets[lx][dy].size() > static_cast<size_t>(dx)) { return true; } break; case TINYEXR_TILE_RIPMAP_LEVELS: { size_t idx = static_cast<size_t>(lx) + static_cast<size_t>(ly)* static_cast<size_t>(num_x_levels); if (lx < num_x_levels && ly < num_y_levels && (offset_data.offsets.size() > idx) && offset_data.offsets[idx].size() > static_cast<size_t>(dy) && offset_data.offsets[idx][dy].size() > static_cast<size_t>(dx)) { return true; } } break; default: return false; } return false; } static void ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const unsigned char* head, const unsigned char* marker, const size_t /*size*/, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = marker - head; if (isMultiPartFile) { //int partNumber; marker += sizeof(int); } int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); marker += sizeof(int); int tileY; memcpy(&tileY, marker, sizeof(int)); tinyexr::swap4(&tileY); marker += sizeof(int); int levelX; memcpy(&levelX, marker, sizeof(int)); tinyexr::swap4(&levelX); marker += sizeof(int); int levelY; memcpy(&levelY, marker, sizeof(int)); tinyexr::swap4(&levelY); marker += sizeof(int); if (isDeep) { tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_offset_table_size)); marker += sizeof(tinyexr::tinyexr_int64); tinyexr::tinyexr_int64 packed_sample_size; memcpy(&packed_sample_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64*>(&packed_sample_size)); marker += sizeof(tinyexr::tinyexr_int64); // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; } else { int dataSize; memcpy(&dataSize, marker, sizeof(int)); tinyexr::swap4(&dataSize); marker += sizeof(int); marker += dataSize; } if (!isValidTile(exr_header, offset_data, tileX, tileY, levelX, levelY)) return; int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); offset_data.offsets[level_idx][tileY][tileX] = tileOffset; } } } } // marker output is also static int ReadOffsets(OffsetData& offset_data, const unsigned char* head, const unsigned char*& marker, const size_t size, const char** err) { for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offset_data.offsets[l][dy][dx] = offset; } } } return TINYEXR_SUCCESS; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } if (exr_header->data_window.max_x < exr_header->data_window.min_x || exr_header->data_window.max_x - exr_header->data_window.min_x == std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_width = exr_header->data_window.max_x - exr_header->data_window.min_x + 1; if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } int data_height = exr_header->data_window.max_y - exr_header->data_window.min_y + 1; // Do not allow too large data_width and data_height. header invalid? { if (data_width > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } if (exr_header->tiled) { if (exr_header->tile_size_x > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (exr_header->tile_size_y > TINYEXR_DIMENSION_THRESHOLD) { tinyexr::SetErrorMessage("tile height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. OffsetData offset_data; size_t num_blocks = 0; // For a multi-resolution image, the size of the offset table will be calculated from the other attributes of the header. // If chunk_count > 0 then chunk_count must be equal to the calculated tile count. if (exr_header->tiled) { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); num_blocks = InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != static_cast<int>(num_blocks)) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } } int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { ReconstructTileOffsets(offset_data, exr_header, head, marker, size, exr_header->multipart, exr_header->non_image); } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); InitSingleResolutionOffsets(offset_data, num_blocks); } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } InitSingleResolutionOffsets(offset_data, num_blocks); } if (!exr_header->tiled) { std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offset_data, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader &exr_header, std::vector<std::string> &layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel(size_t i, std::string n) : index(i), name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader &exr_header, const std::string layer_name, std::vector<LayerChannel> &channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */ NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer( exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * static_cast<int>(exr_header.tile_size_x) + i; const int jj = exr_image.tiles[it].offset_y * static_cast<int>(exr_header.tile_size_y) + j; const int idx = ii + jj * static_cast<int>(exr_image.width); // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); exr_header->multipart = version->multipart ? 1 : 0; exr_header->non_image = version->non_image ? 1 : 0; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } namespace tinyexr { // out_data must be allocated initially with the block-header size // of the current image(-part) type static bool EncodePixelData(/* out */ std::vector<unsigned char>& out_data, const unsigned char* const* images, int compression_type, int /*line_order*/, int width, // for tiled : tile.width int /*height*/, // for tiled : header.tile_size_y int x_stride, // for tiled : header.tile_size_x int line_no, // for tiled : 0 int num_lines, // for tiled : tile.height size_t pixel_data_size, const std::vector<ChannelInfo>& channels, const std::vector<size_t>& channel_offset_list, const void* compression_param = 0) // zfp compression param { size_t buf_size = static_cast<size_t>(width) * static_cast<size_t>(num_lines) * static_cast<size_t>(pixel_data_size); //int last2bit = (buf_size & 3); // buf_size must be multiple of four //if(last2bit) buf_size += 4 - last2bit; std::vector<unsigned char> buf(buf_size); size_t start_y = static_cast<size_t>(line_no); for (size_t c = 0; c < channels.size(); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(&f32.f); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned short val = reinterpret_cast<const unsigned short * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { float val = reinterpret_cast<const float * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < num_lines; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * width) + channel_offset_list[c] * static_cast<size_t>(width))); for (int x = 0; x < width; x++) { unsigned int val = reinterpret_cast<const unsigned int * const *>( images)[c][(y + start_y) * x_stride + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) out_data.insert(out_data.end(), buf.begin(), buf.end()); } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = static_cast<unsigned int>(outSize); // truncate out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, width, num_lines); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP const ZFPCompressionParam* zfp_compression_param = reinterpret_cast<const ZFPCompressionParam*>(compression_param); std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), width, num_lines, static_cast<int>(channels.size()), *zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) unsigned int data_len = outSize; out_data.insert(out_data.end(), block.begin(), block.begin() + data_len); #else (void)compression_param; assert(0); #endif } else { assert(0); return false; } return true; } static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_header, const std::vector<tinyexr::ChannelInfo>& channels, std::vector<std::vector<unsigned char> >& data_list, size_t start_index, // for data_list int num_x_tiles, int num_y_tiles, const std::vector<size_t>& channel_offset_list, int pixel_data_size, const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; assert(num_tiles == level_image->num_tiles); if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); #else bool invalid_data(false); #endif #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = tile_count++) < num_tiles) { #else // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_tiles; i++) { #endif size_t tile_idx = static_cast<size_t>(i); size_t data_idx = tile_idx + start_index; int x_tile = i % num_x_tiles; int y_tile = i / num_x_tiles; EXRTile& tile = level_image->tiles[tile_idx]; const unsigned char* const* images = static_cast<const unsigned char* const*>(tile.images); data_list[data_idx].resize(5*sizeof(int)); size_t data_header_size = data_list[data_idx].size(); bool ret = EncodePixelData(data_list[data_idx], images, exr_header->compression_type, 0, // increasing y tile.width, exr_header->tile_size_y, exr_header->tile_size_x, 0, tile.height, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; } assert(data_list[data_idx].size() > data_header_size); int data_len = static_cast<int>(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); memcpy(&data_list[data_idx][4], &y_tile, sizeof(int)); memcpy(&data_list[data_idx][8], &level_image->level_x, sizeof(int)); memcpy(&data_list[data_idx][12], &level_image->level_y, sizeof(int)); memcpy(&data_list[data_idx][16], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[data_idx][0])); swap4(reinterpret_cast<int*>(&data_list[data_idx][4])); swap4(reinterpret_cast<int*>(&data_list[data_idx][8])); swap4(reinterpret_cast<int*>(&data_list[data_idx][12])); swap4(reinterpret_cast<int*>(&data_list[data_idx][16])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode tile data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } return TINYEXR_SUCCESS; } static int NumScanlines(int compression_type) { int num_scanlines = 1; if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } return num_scanlines; } static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, const std::vector<ChannelInfo>& channels, int num_blocks, tinyexr_uint64 chunk_offset, // starting offset of current chunk bool is_multipart, OffsetData& offset_data, // output block offsets, must be initialized std::vector<std::vector<unsigned char> >& data_list, // output tinyexr_uint64& total_size, // output: ending offset of current chunk std::string* err) { int num_scanlines = NumScanlines(exr_header->compression_type); data_list.resize(num_blocks); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; { size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (channels[c].requested_pixel_type == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } } const void* compression_param = 0; #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { std::string e; bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes, &e); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } compression_param = &zfp_compression_param; } #endif tinyexr_uint64 offset = chunk_offset; tinyexr_uint64 doffset = is_multipart ? 4u : 0u; if (exr_image->tiles) { const EXRImage* level_image = exr_image; size_t block_idx = 0; tinyexr::tinyexr_uint64 block_data_size = 0; int num_levels = (exr_header->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data.num_x_levels : (offset_data.num_x_levels * offset_data.num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { if (!level_image) { if (err) { (*err) += "Invalid number of tiled levels for EncodeChunk\n"; } return TINYEXR_ERROR_INVALID_DATA; } int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; } return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = (int)offset_data.offsets[level_index].size(); assert(num_y_tiles); int num_x_tiles = (int)offset_data.offsets[level_index][0].size(); assert(num_x_tiles); std::string e; int ret = EncodeTiledLevel(level_image, exr_header, channels, data_list, block_idx, num_x_tiles, num_y_tiles, channel_offset_list, pixel_data_size, compression_param, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty() && err) { (*err) += e; } return ret; } for (size_t j = 0; j < static_cast<size_t>(num_y_tiles); ++j) for (size_t i = 0; i < static_cast<size_t>(num_x_tiles); ++i) { offset_data.offsets[level_index][j][i] = offset; swap8(reinterpret_cast<tinyexr_uint64*>(&offset_data.offsets[level_index][j][i])); offset += data_list[block_idx].size() + doffset; block_data_size += data_list[block_idx].size(); ++block_idx; } level_image = level_image->next_level; } assert(static_cast<int>(block_idx) == num_blocks); total_size = offset; } else { // scanlines std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data.offsets[0][0]; #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::atomic<bool> invalid_data(false); std::vector<std::thread> workers; std::atomic<int> block_count(0); int num_threads = std::min(std::max(1, int(std::thread::hardware_concurrency())), num_blocks); for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int i = 0; while ((i = block_count++) < num_blocks) { #else bool invalid_data(false); #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { #endif int start_y = num_scanlines * i; int end_Y = (std::min)(num_scanlines * (i + 1), exr_image->height); int num_lines = end_Y - start_y; const unsigned char* const* images = static_cast<const unsigned char* const*>(exr_image->images); data_list[i].resize(2*sizeof(int)); size_t data_header_size = data_list[i].size(); bool ret = EncodePixelData(data_list[i], images, exr_header->compression_type, 0, // increasing y exr_image->width, exr_image->height, exr_image->width, start_y, num_lines, pixel_data_size, channels, channel_offset_list, compression_param); if (!ret) { invalid_data = true; continue; // "break" cannot be used with OpenMP } assert(data_list[i].size() > data_header_size); int data_len = static_cast<int>(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); swap4(reinterpret_cast<int*>(&data_list[i][0])); swap4(reinterpret_cast<int*>(&data_list[i][4])); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif if (invalid_data) { if (err) { (*err) += "Failed to encode scanline data.\n"; } return TINYEXR_ERROR_INVALID_DATA; } for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size() + doffset; } total_size = static_cast<size_t>(offset); } return TINYEXR_SUCCESS; } // can save a single or multi-part image (no deep* formats) static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory_out == NULL) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } { for (unsigned int i = 0; i < num_parts; ++i) { if (exr_headers[i]->compression_type < 0) { SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #else for (int c = 0; c < exr_header->num_channels; ++c) { if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif } } std::vector<unsigned char> memory; // Header { const char header[] = { 0x76, 0x2f, 0x31, 0x01 }; memory.insert(memory.end(), header, header + 4); } // Version // using value from the first header int long_name = exr_headers[0]->long_name; { char marker[] = { 2, 0, 0, 0 }; /* @todo if (exr_header->non_image) { marker[1] |= 0x8; } */ // tiled if (num_parts == 1 && exr_images[0].tiles) { marker[1] |= 0x2; } // long_name if (long_name) { marker[1] |= 0x4; } // multipart if (num_parts > 1) { marker[1] |= 0x10; } memory.insert(memory.end(), marker, marker + 4); } int total_chunk_count = 0; std::vector<int> chunk_count(num_parts); std::vector<OffsetData> offset_data(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { if (!exr_images[i].tiles) { int num_scanlines = NumScanlines(exr_headers[i]->compression_type); chunk_count[i] = (exr_images[i].height + num_scanlines - 1) / num_scanlines; InitSingleResolutionOffsets(offset_data[i], chunk_count[i]); total_chunk_count += chunk_count[i]; } else { { std::vector<int> num_x_tiles, num_y_tiles; PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); chunk_count[i] = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); total_chunk_count += chunk_count[i]; } } } // Write attributes to memory buffer. std::vector< std::vector<tinyexr::ChannelInfo> > channels(num_parts); { std::set<std::string> partnames; for (unsigned int i = 0; i < num_parts; ++i) { //channels { std::vector<unsigned char> data; for (int c = 0; c < exr_headers[i]->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_headers[i]->pixel_types[c]; info.requested_pixel_type = exr_headers[i]->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_headers[i]->channels[c].name); channels[i].push_back(info); } tinyexr::WriteChannelInfo(data, channels[i]); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_headers[i]->compression_type; swap4(&comp); WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char*>(&comp), 1); } { int data[4] = { 0, 0, exr_images[i].width - 1, exr_images[i].height - 1 }; swap4(&data[0]); swap4(&data[1]); swap4(&data[2]); swap4(&data[3]); WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char*>(data), sizeof(int) * 4); int data0[4] = { 0, 0, exr_images[0].width - 1, exr_images[0].height - 1 }; swap4(&data0[0]); swap4(&data0[1]); swap4(&data0[2]); swap4(&data0[3]); // Note: must be the same across parts (currently, using value from the first header) WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char*>(data0), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { // Note: must be the same across parts float aspectRatio = 1.0f; swap4(&aspectRatio); WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char*>(&aspectRatio), sizeof(float)); } { float center[2] = { 0.0f, 0.0f }; swap4(&center[0]); swap4(&center[1]); WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char*>(center), 2 * sizeof(float)); } { float w = 1.0f; swap4(&w); WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char*>(&w), sizeof(float)); } if (exr_images[i].tiles) { unsigned char tile_mode = static_cast<unsigned char>(exr_headers[i]->tile_level_mode & 0x3); if (exr_headers[i]->tile_rounding_mode) tile_mode |= (1u << 4u); //unsigned char data[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int datai[3] = { 0, 0, 0 }; unsigned char* data = reinterpret_cast<unsigned char*>(&datai[0]); datai[0] = static_cast<unsigned int>(exr_headers[i]->tile_size_x); datai[1] = static_cast<unsigned int>(exr_headers[i]->tile_size_y); data[8] = tile_mode; swap4(reinterpret_cast<unsigned int*>(&data[0])); swap4(reinterpret_cast<unsigned int*>(&data[4])); WriteAttributeToMemory( &memory, "tiles", "tiledesc", reinterpret_cast<const unsigned char*>(data), 9); } // must be present for multi-part files - according to spec. if (num_parts > 1) { // name { size_t len = 0; if ((len = strlen(exr_headers[i]->name)) > 0) { partnames.insert(std::string(exr_headers[i]->name)); if (partnames.size() != i + 1) { SetErrorMessage("'name' attributes must be unique for a multi-part file", err); return 0; } WriteAttributeToMemory( &memory, "name", "string", reinterpret_cast<const unsigned char*>(exr_headers[i]->name), static_cast<int>(len)); } else { SetErrorMessage("Invalid 'name' attribute for a multi-part file", err); return 0; } } // type { const char* type = "scanlineimage"; if (exr_images[i].tiles) type = "tiledimage"; WriteAttributeToMemory( &memory, "type", "string", reinterpret_cast<const unsigned char*>(type), static_cast<int>(strlen(type))); } // chunkCount { WriteAttributeToMemory( &memory, "chunkCount", "int", reinterpret_cast<const unsigned char*>(&chunk_count[i]), 4); } } // Custom attributes if (exr_headers[i]->num_custom_attributes > 0) { for (int j = 0; j < exr_headers[i]->num_custom_attributes; j++) { tinyexr::WriteAttributeToMemory( &memory, exr_headers[i]->custom_attributes[j].name, exr_headers[i]->custom_attributes[j].type, reinterpret_cast<const unsigned char*>( exr_headers[i]->custom_attributes[j].value), exr_headers[i]->custom_attributes[j].size); } } { // end of header memory.push_back(0); } } } if (num_parts > 1) { // end of header list memory.push_back(0); } tinyexr_uint64 chunk_offset = memory.size() + size_t(total_chunk_count) * sizeof(tinyexr_uint64); tinyexr_uint64 total_size = 0; std::vector< std::vector< std::vector<unsigned char> > > data_lists(num_parts); for (unsigned int i = 0; i < num_parts; ++i) { std::string e; int ret = EncodeChunk(&exr_images[i], exr_headers[i], channels[i], chunk_count[i], // starting offset of current chunk after part-number chunk_offset, num_parts > 1, offset_data[i], // output: block offsets, must be initialized data_lists[i], // output total_size, // output &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return 0; } chunk_offset = total_size; } // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char*>(malloc(total_size)); // Writing header memcpy((*memory_out), &memory[0], memory.size()); unsigned char* memory_ptr = *memory_out + memory.size(); size_t sum = memory.size(); // Writing offset data for chunks for (unsigned int i = 0; i < num_parts; ++i) { if (exr_images[i].tiles) { const EXRImage* level_image = &exr_images[i]; int num_levels = (exr_headers[i]->tile_level_mode != TINYEXR_TILE_RIPMAP_LEVELS) ? offset_data[i].num_x_levels : (offset_data[i].num_x_levels * offset_data[i].num_y_levels); for (int level_index = 0; level_index < num_levels; ++level_index) { for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; assert(sum <= total_size); memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offset_data[i].offsets[level_index][j][0]), num_bytes); memory_ptr += num_bytes; } level_image = level_image->next_level; } } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(chunk_count[i]); sum += num_bytes; assert(sum <= total_size); std::vector<tinyexr::tinyexr_uint64>& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast<unsigned char*>(&offsets[0]), num_bytes); memory_ptr += num_bytes; } } // Writing chunk data for (unsigned int i = 0; i < num_parts; ++i) { for (size_t j = 0; j < static_cast<size_t>(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; assert(sum <= total_size); unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); assert(sum <= total_size); memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } assert(sum == total_size); return total_size; // OK } } // tinyexr size_t SaveEXRImageToMemory(const EXRImage* exr_image, const EXRHeader* exr_header, unsigned char** memory_out, const char** err) { return tinyexr::SaveEXRNPartImageToMemory(exr_image, &exr_header, 1, memory_out, err); } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } size_t SaveEXRMultipartImageToMemory(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, unsigned char** memory_out, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2 || memory_out == NULL) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRNPartImageToMemory", err); return 0; } return tinyexr::SaveEXRNPartImageToMemory(exr_images, exr_headers, num_parts, memory_out, err); } int SaveEXRMultipartImageToFile(const EXRImage* exr_images, const EXRHeader** exr_headers, unsigned int num_parts, const char* filename, const char** err) { if (exr_images == NULL || exr_headers == NULL || num_parts < 2) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRMultipartImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"wb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } #else // Unknown compiler fp = fopen(filename, "wb"); #endif #else fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file: " + std::string(filename), err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(&dx); tinyexr::swap4(&dy); tinyexr::swap4(&dw); tinyexr::swap4(&dh); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(&x); tinyexr::swap4(&y); tinyexr::swap4(&w); tinyexr::swap4(&h); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(&line_no); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->next_level = NULL; exr_image->level_x = 0; exr_image->level_y = 0; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } EXRSetNameAttr(exr_header, NULL); return TINYEXR_SUCCESS; } void EXRSetNameAttr(EXRHeader* exr_header, const char* name) { if (exr_header == NULL) { return; } memset(exr_header->name, 0, 256); if (name != NULL) { size_t len = std::min(strlen(name), (size_t)255); if (len) { memcpy(exr_header->name, name, len); } } } int EXRNumLevels(const EXRImage* exr_image) { if (exr_image == NULL) return 0; if(exr_image->images) return 1; // scanlines int levels = 1; const EXRImage* level_image = exr_image; while((level_image = level_image->next_level)) ++levels; return levels; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_image->next_level) { FreeEXRImage(exr_image->next_level); delete exr_image->next_level; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); memset(exr_header, 0, sizeof(EXRHeader)); ConvertHeader(exr_header, infos[i]); exr_header->multipart = exr_version->multipart ? 1 : 0; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t err = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (err != 0) { // TODO(syoyo): return wfopen_s erro code return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<tinyexr::OffsetData> chunk_offset_table_list; chunk_offset_table_list.reserve(num_parts); for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { chunk_offset_table_list.resize(chunk_offset_table_list.size() + 1); tinyexr::OffsetData& offset_data = chunk_offset_table_list.back(); if (!exr_headers[i]->tiled || exr_headers[i]->tile_level_mode == TINYEXR_TILE_ONE_LEVEL) { tinyexr::InitSingleResolutionOffsets(offset_data, exr_headers[i]->chunk_count); std::vector<tinyexr::tinyexr_uint64>& offset_table = offset_data.offsets[0][0]; for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } } else { { std::vector<int> num_x_tiles, num_y_tiles; tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); return TINYEXR_ERROR_INVALID_DATA; } } for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) { for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_data.offsets[l][dy][dx] = offset + 4; // +4 to skip 'part number' marker += sizeof(tinyexr::tinyexr_uint64); // = 8 } } } } } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { tinyexr::OffsetData &offset_data = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) for (unsigned int dy = 0; dy < offset_data.offsets[l].size(); ++dy) for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { const unsigned char *part_number_addr = memory + offset_data.offsets[l][dy][dx] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_data, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } FILE *fp = NULL; #ifdef _WIN32 #if defined(_MSC_VER) || defined(__MINGW32__) // MSVC, MinGW gcc or clang errno_t errcode = _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); if (errcode != 0) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else // Unknown compiler fp = fopen(filename, "rb"); #endif #else fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEFINED #endif // TINYEXR_IMPLEMENTATION
untied_task.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // XFAIL: gcc-4 // gcc-4 manages frame pointers for parallel regions differently than other APIs. the parallel region's enter_frame.ptr // matches the implicit task's exit_frame.ptr. for that reason, this test will fail. #define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN #include "callback.h" #include <omp.h> int main() { int condition=0; omp_set_nested(0); print_frame(0); #pragma omp parallel num_threads(2) { print_frame_from_outlined_fn(1); print_ids(0); print_ids(1); print_frame(0); #pragma omp master { print_ids(0); #pragma omp task untied shared(condition) { OMPT_SIGNAL(condition); print_frame(1); print_ids(0); print_ids(1); print_ids(2); #pragma omp task if(0) { print_ids(0); print_ids(1); print_ids(2); } print_ids(0); print_ids(1); print_ids(2); } OMPT_WAIT(condition,1); print_ids(0); } #pragma omp barrier print_ids(0); } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]] // nested parallel masters // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // <- ompt_event_task_create would be expected here // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=0x{{[0-f]+}}, new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[TASK_FUNCTION:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // explicit barrier after master // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // implicit barrier parallel // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}} // this is expected to come earlier and at MASTER: // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}} // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]] return 0; }
_hypre_utilities.h
/*** DO NOT EDIT THIS FILE DIRECTLY (use 'headers' to generate) ***/ #ifndef hypre_UTILITIES_HEADER #define hypre_UTILITIES_HEADER #include "HYPRE_utilities.h" #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif #ifdef __cplusplus extern "C" { #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * General structures and values * *****************************************************************************/ #ifndef hypre_GENERAL_HEADER #define hypre_GENERAL_HEADER /* This allows us to consistently avoid 'int' throughout hypre */ typedef int hypre_int; typedef long int hypre_longint; typedef unsigned int hypre_uint; typedef unsigned long int hypre_ulongint; typedef unsigned long long int hypre_ulonglongint; /* This allows us to consistently avoid 'double' throughout hypre */ typedef double hypre_double; /*-------------------------------------------------------------------------- * Define various functions *--------------------------------------------------------------------------*/ #ifndef hypre_max #define hypre_max(a,b) (((a)<(b)) ? (b) : (a)) #endif #ifndef hypre_min #define hypre_min(a,b) (((a)<(b)) ? (a) : (b)) #endif #ifndef hypre_abs #define hypre_abs(a) (((a)>0) ? (a) : -(a)) #endif #ifndef hypre_round #define hypre_round(x) ( ((x) < 0.0) ? ((HYPRE_Int)(x - 0.5)) : ((HYPRE_Int)(x + 0.5)) ) #endif #ifndef hypre_pow2 #define hypre_pow2(i) ( 1 << (i) ) #endif #endif /* hypre_GENERAL_HEADER */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef hypre_PRINTF_HEADER #define hypre_PRINTF_HEADER #include <stdio.h> /* printf.c */ // #ifdef HYPRE_BIGINT HYPRE_Int hypre_ndigits( HYPRE_BigInt number ); HYPRE_Int hypre_printf( const char *format, ... ); HYPRE_Int hypre_fprintf( FILE *stream, const char *format, ... ); HYPRE_Int hypre_sprintf( char *s, const char *format, ... ); HYPRE_Int hypre_scanf( const char *format, ... ); HYPRE_Int hypre_fscanf( FILE *stream, const char *format, ... ); HYPRE_Int hypre_sscanf( char *s, const char *format, ... ); // #else // #define hypre_printf printf // #define hypre_fprintf fprintf // #define hypre_sprintf sprintf // #define hypre_scanf scanf // #define hypre_fscanf fscanf // #define hypre_sscanf sscanf // #endif #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef hypre_ERROR_HEADER #define hypre_ERROR_HEADER #include <assert.h> /*-------------------------------------------------------------------------- * Global variable used in hypre error checking *--------------------------------------------------------------------------*/ extern HYPRE_Int hypre__global_error; #define hypre_error_flag hypre__global_error /*-------------------------------------------------------------------------- * HYPRE error macros *--------------------------------------------------------------------------*/ void hypre_error_handler(const char *filename, HYPRE_Int line, HYPRE_Int ierr, const char *msg); #define hypre_error(IERR) hypre_error_handler(__FILE__, __LINE__, IERR, NULL) #define hypre_error_w_msg(IERR, msg) hypre_error_handler(__FILE__, __LINE__, IERR, msg) #define hypre_error_in_arg(IARG) hypre_error(HYPRE_ERROR_ARG | IARG<<3) #if defined(HYPRE_DEBUG) /* host assert */ #define hypre_assert(EX) do { if (!(EX)) { fprintf(stderr, "[%s, %d] hypre_assert failed: %s\n", __FILE__, __LINE__, #EX); hypre_error(1); assert(0); } } while (0) /* device assert */ #if defined(HYPRE_USING_CUDA) #define hypre_device_assert(EX) assert(EX) #elif defined(HYPRE_USING_HIP) /* FIXME: Currently, asserts in device kernels in HIP do not behave well */ #define hypre_device_assert(EX) #endif #else /* #ifdef HYPRE_DEBUG */ /* this is to silence compiler's unused variable warnings */ #ifdef __cplusplus #define hypre_assert(EX) do { if (0) { static_cast<void> (EX); } } while (0) #else #define hypre_assert(EX) do { if (0) { (void) (EX); } } while (0) #endif #define hypre_device_assert(EX) #endif #endif /* hypre_ERROR_HEADER */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Fake mpi stubs to generate serial codes without mpi * *****************************************************************************/ #ifndef hypre_MPISTUBS #define hypre_MPISTUBS #ifdef __cplusplus extern "C" { #endif #ifdef HYPRE_SEQUENTIAL /****************************************************************************** * MPI stubs to generate serial codes without mpi *****************************************************************************/ /*-------------------------------------------------------------------------- * Change all MPI names to hypre_MPI names to avoid link conflicts. * * NOTE: MPI_Comm is the only MPI symbol in the HYPRE user interface, * and is defined in `HYPRE_utilities.h'. *--------------------------------------------------------------------------*/ #define MPI_Comm hypre_MPI_Comm #define MPI_Group hypre_MPI_Group #define MPI_Request hypre_MPI_Request #define MPI_Datatype hypre_MPI_Datatype #define MPI_Status hypre_MPI_Status #define MPI_Op hypre_MPI_Op #define MPI_Aint hypre_MPI_Aint #define MPI_Info hypre_MPI_Info #define MPI_COMM_WORLD hypre_MPI_COMM_WORLD #define MPI_COMM_NULL hypre_MPI_COMM_NULL #define MPI_COMM_SELF hypre_MPI_COMM_SELF #define MPI_COMM_TYPE_SHARED hypre_MPI_COMM_TYPE_SHARED #define MPI_BOTTOM hypre_MPI_BOTTOM #define MPI_FLOAT hypre_MPI_FLOAT #define MPI_DOUBLE hypre_MPI_DOUBLE #define MPI_LONG_DOUBLE hypre_MPI_LONG_DOUBLE #define MPI_INT hypre_MPI_INT #define MPI_LONG_LONG_INT hypre_MPI_LONG_LONG_INT #define MPI_CHAR hypre_MPI_CHAR #define MPI_LONG hypre_MPI_LONG #define MPI_BYTE hypre_MPI_BYTE #define MPI_C_DOUBLE_COMPLEX hypre_MPI_COMPLEX #define MPI_SUM hypre_MPI_SUM #define MPI_MIN hypre_MPI_MIN #define MPI_MAX hypre_MPI_MAX #define MPI_LOR hypre_MPI_LOR #define MPI_LAND hypre_MPI_LAND #define MPI_SUCCESS hypre_MPI_SUCCESS #define MPI_STATUSES_IGNORE hypre_MPI_STATUSES_IGNORE #define MPI_UNDEFINED hypre_MPI_UNDEFINED #define MPI_REQUEST_NULL hypre_MPI_REQUEST_NULL #define MPI_INFO_NULL hypre_MPI_INFO_NULL #define MPI_ANY_SOURCE hypre_MPI_ANY_SOURCE #define MPI_ANY_TAG hypre_MPI_ANY_TAG #define MPI_SOURCE hypre_MPI_SOURCE #define MPI_TAG hypre_MPI_TAG #define MPI_Init hypre_MPI_Init #define MPI_Finalize hypre_MPI_Finalize #define MPI_Abort hypre_MPI_Abort #define MPI_Wtime hypre_MPI_Wtime #define MPI_Wtick hypre_MPI_Wtick #define MPI_Barrier hypre_MPI_Barrier #define MPI_Comm_create hypre_MPI_Comm_create #define MPI_Comm_dup hypre_MPI_Comm_dup #define MPI_Comm_f2c hypre_MPI_Comm_f2c #define MPI_Comm_group hypre_MPI_Comm_group #define MPI_Comm_size hypre_MPI_Comm_size #define MPI_Comm_rank hypre_MPI_Comm_rank #define MPI_Comm_free hypre_MPI_Comm_free #define MPI_Comm_split hypre_MPI_Comm_split #define MPI_Comm_split_type hypre_MPI_Comm_split_type #define MPI_Group_incl hypre_MPI_Group_incl #define MPI_Group_free hypre_MPI_Group_free #define MPI_Address hypre_MPI_Address #define MPI_Get_count hypre_MPI_Get_count #define MPI_Alltoall hypre_MPI_Alltoall #define MPI_Allgather hypre_MPI_Allgather #define MPI_Allgatherv hypre_MPI_Allgatherv #define MPI_Gather hypre_MPI_Gather #define MPI_Gatherv hypre_MPI_Gatherv #define MPI_Scatter hypre_MPI_Scatter #define MPI_Scatterv hypre_MPI_Scatterv #define MPI_Bcast hypre_MPI_Bcast #define MPI_Send hypre_MPI_Send #define MPI_Recv hypre_MPI_Recv #define MPI_Isend hypre_MPI_Isend #define MPI_Irecv hypre_MPI_Irecv #define MPI_Send_init hypre_MPI_Send_init #define MPI_Recv_init hypre_MPI_Recv_init #define MPI_Irsend hypre_MPI_Irsend #define MPI_Startall hypre_MPI_Startall #define MPI_Probe hypre_MPI_Probe #define MPI_Iprobe hypre_MPI_Iprobe #define MPI_Test hypre_MPI_Test #define MPI_Testall hypre_MPI_Testall #define MPI_Wait hypre_MPI_Wait #define MPI_Waitall hypre_MPI_Waitall #define MPI_Waitany hypre_MPI_Waitany #define MPI_Allreduce hypre_MPI_Allreduce #define MPI_Reduce hypre_MPI_Reduce #define MPI_Scan hypre_MPI_Scan #define MPI_Request_free hypre_MPI_Request_free #define MPI_Type_contiguous hypre_MPI_Type_contiguous #define MPI_Type_vector hypre_MPI_Type_vector #define MPI_Type_hvector hypre_MPI_Type_hvector #define MPI_Type_struct hypre_MPI_Type_struct #define MPI_Type_commit hypre_MPI_Type_commit #define MPI_Type_free hypre_MPI_Type_free #define MPI_Op_free hypre_MPI_Op_free #define MPI_Op_create hypre_MPI_Op_create #define MPI_User_function hypre_MPI_User_function #define MPI_Info_create hypre_MPI_Info_create /*-------------------------------------------------------------------------- * Types, etc. *--------------------------------------------------------------------------*/ /* These types have associated creation and destruction routines */ typedef HYPRE_Int hypre_MPI_Comm; typedef HYPRE_Int hypre_MPI_Group; typedef HYPRE_Int hypre_MPI_Request; typedef HYPRE_Int hypre_MPI_Datatype; typedef void (hypre_MPI_User_function) (); typedef struct { HYPRE_Int hypre_MPI_SOURCE; HYPRE_Int hypre_MPI_TAG; } hypre_MPI_Status; typedef HYPRE_Int hypre_MPI_Op; typedef HYPRE_Int hypre_MPI_Aint; typedef HYPRE_Int hypre_MPI_Info; #define hypre_MPI_COMM_SELF 1 #define hypre_MPI_COMM_WORLD 0 #define hypre_MPI_COMM_NULL -1 #define hypre_MPI_COMM_TYPE_SHARED 0 #define hypre_MPI_BOTTOM 0x0 #define hypre_MPI_FLOAT 0 #define hypre_MPI_DOUBLE 1 #define hypre_MPI_LONG_DOUBLE 2 #define hypre_MPI_INT 3 #define hypre_MPI_CHAR 4 #define hypre_MPI_LONG 5 #define hypre_MPI_BYTE 6 #define hypre_MPI_REAL 7 #define hypre_MPI_COMPLEX 8 #define hypre_MPI_LONG_LONG_INT 9 #define hypre_MPI_SUM 0 #define hypre_MPI_MIN 1 #define hypre_MPI_MAX 2 #define hypre_MPI_LOR 3 #define hypre_MPI_LAND 4 #define hypre_MPI_SUCCESS 0 #define hypre_MPI_STATUSES_IGNORE 0 #define hypre_MPI_UNDEFINED -9999 #define hypre_MPI_REQUEST_NULL 0 #define hypre_MPI_INFO_NULL 0 #define hypre_MPI_ANY_SOURCE 1 #define hypre_MPI_ANY_TAG 1 #else /****************************************************************************** * MPI stubs to do casting of HYPRE_Int and hypre_int correctly *****************************************************************************/ typedef MPI_Comm hypre_MPI_Comm; typedef MPI_Group hypre_MPI_Group; typedef MPI_Request hypre_MPI_Request; typedef MPI_Datatype hypre_MPI_Datatype; typedef MPI_Status hypre_MPI_Status; typedef MPI_Op hypre_MPI_Op; typedef MPI_Aint hypre_MPI_Aint; typedef MPI_Info hypre_MPI_Info; typedef MPI_User_function hypre_MPI_User_function; #define hypre_MPI_COMM_WORLD MPI_COMM_WORLD #define hypre_MPI_COMM_NULL MPI_COMM_NULL #define hypre_MPI_BOTTOM MPI_BOTTOM #define hypre_MPI_COMM_SELF MPI_COMM_SELF #define hypre_MPI_COMM_TYPE_SHARED MPI_COMM_TYPE_SHARED #define hypre_MPI_FLOAT MPI_FLOAT #define hypre_MPI_DOUBLE MPI_DOUBLE #define hypre_MPI_LONG_DOUBLE MPI_LONG_DOUBLE /* HYPRE_MPI_INT is defined in HYPRE_utilities.h */ #define hypre_MPI_INT HYPRE_MPI_INT #define hypre_MPI_CHAR MPI_CHAR #define hypre_MPI_LONG MPI_LONG #define hypre_MPI_BYTE MPI_BYTE /* HYPRE_MPI_REAL is defined in HYPRE_utilities.h */ #define hypre_MPI_REAL HYPRE_MPI_REAL /* HYPRE_MPI_COMPLEX is defined in HYPRE_utilities.h */ #define hypre_MPI_COMPLEX HYPRE_MPI_COMPLEX #define hypre_MPI_SUM MPI_SUM #define hypre_MPI_MIN MPI_MIN #define hypre_MPI_MAX MPI_MAX #define hypre_MPI_LOR MPI_LOR #define hypre_MPI_SUCCESS MPI_SUCCESS #define hypre_MPI_STATUSES_IGNORE MPI_STATUSES_IGNORE #define hypre_MPI_UNDEFINED MPI_UNDEFINED #define hypre_MPI_REQUEST_NULL MPI_REQUEST_NULL #define hypre_MPI_INFO_NULL MPI_INFO_NULL #define hypre_MPI_ANY_SOURCE MPI_ANY_SOURCE #define hypre_MPI_ANY_TAG MPI_ANY_TAG #define hypre_MPI_SOURCE MPI_SOURCE #define hypre_MPI_TAG MPI_TAG #define hypre_MPI_LAND MPI_LAND #endif /****************************************************************************** * Everything below this applies to both ifdef cases above *****************************************************************************/ /*-------------------------------------------------------------------------- * Prototypes *--------------------------------------------------------------------------*/ /* mpistubs.c */ HYPRE_Int hypre_MPI_Init( hypre_int *argc, char ***argv ); HYPRE_Int hypre_MPI_Finalize( void ); HYPRE_Int hypre_MPI_Abort( hypre_MPI_Comm comm, HYPRE_Int errorcode ); HYPRE_Real hypre_MPI_Wtime( void ); HYPRE_Real hypre_MPI_Wtick( void ); HYPRE_Int hypre_MPI_Barrier( hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Comm_create( hypre_MPI_Comm comm, hypre_MPI_Group group, hypre_MPI_Comm *newcomm ); HYPRE_Int hypre_MPI_Comm_dup( hypre_MPI_Comm comm, hypre_MPI_Comm *newcomm ); hypre_MPI_Comm hypre_MPI_Comm_f2c( hypre_int comm ); HYPRE_Int hypre_MPI_Comm_size( hypre_MPI_Comm comm, HYPRE_Int *size ); HYPRE_Int hypre_MPI_Comm_rank( hypre_MPI_Comm comm, HYPRE_Int *rank ); HYPRE_Int hypre_MPI_Comm_free( hypre_MPI_Comm *comm ); HYPRE_Int hypre_MPI_Comm_group( hypre_MPI_Comm comm, hypre_MPI_Group *group ); HYPRE_Int hypre_MPI_Comm_split( hypre_MPI_Comm comm, HYPRE_Int n, HYPRE_Int m, hypre_MPI_Comm * comms ); HYPRE_Int hypre_MPI_Group_incl( hypre_MPI_Group group, HYPRE_Int n, HYPRE_Int *ranks, hypre_MPI_Group *newgroup ); HYPRE_Int hypre_MPI_Group_free( hypre_MPI_Group *group ); HYPRE_Int hypre_MPI_Address( void *location, hypre_MPI_Aint *address ); HYPRE_Int hypre_MPI_Get_count( hypre_MPI_Status *status, hypre_MPI_Datatype datatype, HYPRE_Int *count ); HYPRE_Int hypre_MPI_Alltoall( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Allgather( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Allgatherv( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int *recvcounts, HYPRE_Int *displs, hypre_MPI_Datatype recvtype, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Gather( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Gatherv( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int *recvcounts, HYPRE_Int *displs, hypre_MPI_Datatype recvtype, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Scatter( void *sendbuf, HYPRE_Int sendcount, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Scatterv( void *sendbuf, HYPRE_Int *sendcounts, HYPRE_Int *displs, hypre_MPI_Datatype sendtype, void *recvbuf, HYPRE_Int recvcount, hypre_MPI_Datatype recvtype, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Bcast( void *buffer, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Send( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Recv( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Isend( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Irecv( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Send_init( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Recv_init( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Irsend( void *buf, HYPRE_Int count, hypre_MPI_Datatype datatype, HYPRE_Int dest, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Startall( HYPRE_Int count, hypre_MPI_Request *array_of_requests ); HYPRE_Int hypre_MPI_Probe( HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Iprobe( HYPRE_Int source, HYPRE_Int tag, hypre_MPI_Comm comm, HYPRE_Int *flag, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Test( hypre_MPI_Request *request, HYPRE_Int *flag, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Testall( HYPRE_Int count, hypre_MPI_Request *array_of_requests, HYPRE_Int *flag, hypre_MPI_Status *array_of_statuses ); HYPRE_Int hypre_MPI_Wait( hypre_MPI_Request *request, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Waitall( HYPRE_Int count, hypre_MPI_Request *array_of_requests, hypre_MPI_Status *array_of_statuses ); HYPRE_Int hypre_MPI_Waitany( HYPRE_Int count, hypre_MPI_Request *array_of_requests, HYPRE_Int *index, hypre_MPI_Status *status ); HYPRE_Int hypre_MPI_Allreduce( void *sendbuf, void *recvbuf, HYPRE_Int count, hypre_MPI_Datatype datatype, hypre_MPI_Op op, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Reduce( void *sendbuf, void *recvbuf, HYPRE_Int count, hypre_MPI_Datatype datatype, hypre_MPI_Op op, HYPRE_Int root, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Scan( void *sendbuf, void *recvbuf, HYPRE_Int count, hypre_MPI_Datatype datatype, hypre_MPI_Op op, hypre_MPI_Comm comm ); HYPRE_Int hypre_MPI_Request_free( hypre_MPI_Request *request ); HYPRE_Int hypre_MPI_Type_contiguous( HYPRE_Int count, hypre_MPI_Datatype oldtype, hypre_MPI_Datatype *newtype ); HYPRE_Int hypre_MPI_Type_vector( HYPRE_Int count, HYPRE_Int blocklength, HYPRE_Int stride, hypre_MPI_Datatype oldtype, hypre_MPI_Datatype *newtype ); HYPRE_Int hypre_MPI_Type_hvector( HYPRE_Int count, HYPRE_Int blocklength, hypre_MPI_Aint stride, hypre_MPI_Datatype oldtype, hypre_MPI_Datatype *newtype ); HYPRE_Int hypre_MPI_Type_struct( HYPRE_Int count, HYPRE_Int *array_of_blocklengths, hypre_MPI_Aint *array_of_displacements, hypre_MPI_Datatype *array_of_types, hypre_MPI_Datatype *newtype ); HYPRE_Int hypre_MPI_Type_commit( hypre_MPI_Datatype *datatype ); HYPRE_Int hypre_MPI_Type_free( hypre_MPI_Datatype *datatype ); HYPRE_Int hypre_MPI_Op_free( hypre_MPI_Op *op ); HYPRE_Int hypre_MPI_Op_create( hypre_MPI_User_function *function, hypre_int commute, hypre_MPI_Op *op ); #if defined(HYPRE_USING_GPU) HYPRE_Int hypre_MPI_Comm_split_type(hypre_MPI_Comm comm, HYPRE_Int split_type, HYPRE_Int key, hypre_MPI_Info info, hypre_MPI_Comm *newcomm); HYPRE_Int hypre_MPI_Info_create(hypre_MPI_Info *info); HYPRE_Int hypre_MPI_Info_free( hypre_MPI_Info *info ); #endif #ifdef __cplusplus } #endif #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef HYPRE_SMP_HEADER #define HYPRE_SMP_HEADER #endif #define HYPRE_SMP_SCHEDULE schedule(static) /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header file for memory management utilities * * The abstract memory model has a Host (think CPU) and a Device (think GPU) and * three basic types of memory management utilities: * * 1. Malloc(..., location) * location=LOCATION_DEVICE - malloc memory on the device * location=LOCATION_HOST - malloc memory on the host * 2. MemCopy(..., method) * method=HOST_TO_DEVICE - copy from host to device * method=DEVICE_TO_HOST - copy from device to host * method=DEVICE_TO_DEVICE - copy from device to device * 3. SetExecutionMode * location=LOCATION_DEVICE - execute on the device * location=LOCATION_HOST - execute on the host * * Although the abstract model does not explicitly reflect a managed memory * model (i.e., unified memory), it can support it. Here is a summary of how * the abstract model would be mapped to specific hardware scenarios: * * Not using a device, not using managed memory * Malloc(..., location) * location=LOCATION_DEVICE - host malloc e.g., malloc * location=LOCATION_HOST - host malloc e.g., malloc * MemoryCopy(..., locTo,locFrom) * locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from host to host e.g., memcpy * locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to host e.g., memcpy * locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from host to host e.g., memcpy * SetExecutionMode * location=LOCATION_DEVICE - execute on the host * location=LOCATION_HOST - execute on the host * * Using a device, not using managed memory * Malloc(..., location) * location=LOCATION_DEVICE - device malloc e.g., cudaMalloc * location=LOCATION_HOST - host malloc e.g., malloc * MemoryCopy(..., locTo,locFrom) * locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from device to host e.g., cudaMemcpy * locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to device e.g., cudaMemcpy * locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from device to device e.g., cudaMemcpy * SetExecutionMode * location=LOCATION_DEVICE - execute on the device * location=LOCATION_HOST - execute on the host * * Using a device, using managed memory * Malloc(..., location) * location=LOCATION_DEVICE - managed malloc e.g., cudaMallocManaged * location=LOCATION_HOST - host malloc e.g., malloc * MemoryCopy(..., locTo,locFrom) * locTo=LOCATION_HOST, locFrom=LOCATION_DEVICE - copy from device to host e.g., cudaMallocManaged * locTo=LOCATION_DEVICE, locFrom=LOCATION_HOST - copy from host to device e.g., cudaMallocManaged * locTo=LOCATION_DEVICE, locFrom=LOCATION_DEVICE - copy from device to device e.g., cudaMallocManaged * SetExecutionMode * location=LOCATION_DEVICE - execute on the device * location=LOCATION_HOST - execute on the host * *****************************************************************************/ #ifndef hypre_MEMORY_HEADER #define hypre_MEMORY_HEADER #include <stdio.h> #include <stdlib.h> #if defined(HYPRE_USING_UMPIRE) #include "umpire/interface/umpire.h" #define HYPRE_UMPIRE_POOL_NAME_MAX_LEN 1024 #endif /* stringification: * _Pragma(string-literal), so we need to cast argument to a string * The three dots as last argument of the macro tells compiler that this is a variadic macro. * I.e. this is a macro that receives variable number of arguments. */ #define HYPRE_STR(...) #__VA_ARGS__ #define HYPRE_XSTR(...) HYPRE_STR(__VA_ARGS__) #ifdef __cplusplus extern "C" { #endif typedef enum _hypre_MemoryLocation { hypre_MEMORY_UNDEFINED = -1, hypre_MEMORY_HOST, hypre_MEMORY_HOST_PINNED, hypre_MEMORY_DEVICE, hypre_MEMORY_UNIFIED } hypre_MemoryLocation; /*------------------------------------------------------- * hypre_GetActualMemLocation * return actual location based on the selected memory model *-------------------------------------------------------*/ static inline hypre_MemoryLocation hypre_GetActualMemLocation(HYPRE_MemoryLocation location) { if (location == HYPRE_MEMORY_HOST) { return hypre_MEMORY_HOST; } if (location == HYPRE_MEMORY_DEVICE) { #if defined(HYPRE_USING_HOST_MEMORY) return hypre_MEMORY_HOST; #elif defined(HYPRE_USING_DEVICE_MEMORY) return hypre_MEMORY_DEVICE; #elif defined(HYPRE_USING_UNIFIED_MEMORY) return hypre_MEMORY_UNIFIED; #else #error Wrong HYPRE memory setting. #endif } return hypre_MEMORY_UNDEFINED; } #ifdef HYPRE_USING_MEMORY_TRACKER typedef struct { char _action[16]; void *_ptr; size_t _nbytes; hypre_MemoryLocation _memory_location; char _filename[256]; char _function[256]; HYPRE_Int _line; size_t _pair; } hypre_MemoryTrackerEntry; typedef struct { size_t actual_size; size_t alloced_size; size_t prev_end; hypre_MemoryTrackerEntry *data; } hypre_MemoryTracker; /* These Allocs are with memory tracker, for debug */ #define hypre_TAlloc(type, count, location) \ ( \ { \ void *ptr = hypre_MAlloc((size_t)(sizeof(type) * (count)), location); \ hypre_MemoryTrackerInsert("malloc", ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\ (type *) ptr; \ } \ ) #define _hypre_TAlloc(type, count, location) \ ( \ { \ void *ptr = _hypre_MAlloc((size_t)(sizeof(type) * (count)), location); \ hypre_MemoryTrackerInsert("malloc", ptr, sizeof(type)*(count), location, __FILE__, __func__, __LINE__); \ (type *) ptr; \ } \ ) #define hypre_CTAlloc(type, count, location) \ ( \ { \ void *ptr = hypre_CAlloc((size_t)(count), (size_t)sizeof(type), location); \ hypre_MemoryTrackerInsert("calloc", ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\ (type *) ptr; \ } \ ) #define hypre_TReAlloc(ptr, type, count, location) \ ( \ { \ hypre_MemoryTrackerInsert("rfree", ptr, (size_t) -1, hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \ void *new_ptr = hypre_ReAlloc((char *)ptr, (size_t)(sizeof(type) * (count)), location); \ hypre_MemoryTrackerInsert("rmalloc", new_ptr, sizeof(type)*(count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\ (type *) new_ptr; \ } \ ) #define hypre_TReAlloc_v2(ptr, old_type, old_count, new_type, new_count, location) \ ( \ { \ hypre_MemoryTrackerInsert("rfree", ptr, sizeof(old_type)*(old_count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \ void *new_ptr = hypre_ReAlloc_v2((char *)ptr, (size_t)(sizeof(old_type)*(old_count)), (size_t)(sizeof(new_type)*(new_count)), location); \ hypre_MemoryTrackerInsert("rmalloc", new_ptr, sizeof(new_type)*(new_count), hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__);\ (new_type *) new_ptr; \ } \ ) #define hypre_TMemcpy(dst, src, type, count, locdst, locsrc) \ ( \ { \ hypre_Memcpy((void *)(dst), (void *)(src), (size_t)(sizeof(type) * (count)), locdst, locsrc); \ } \ ) #define hypre_TFree(ptr, location) \ ( \ { \ hypre_MemoryTrackerInsert("free", ptr, (size_t) -1, hypre_GetActualMemLocation(location), __FILE__, __func__, __LINE__); \ hypre_Free((void *)ptr, location); \ ptr = NULL; \ } \ ) #define _hypre_TFree(ptr, location) \ ( \ { \ hypre_MemoryTrackerInsert("free", ptr, (size_t) -1, location, __FILE__, __func__, __LINE__); \ _hypre_Free((void *)ptr, location); \ ptr = NULL; \ } \ ) #else /* #ifdef HYPRE_USING_MEMORY_TRACKER */ #define hypre_TAlloc(type, count, location) \ ( (type *) hypre_MAlloc((size_t)(sizeof(type) * (count)), location) ) #define _hypre_TAlloc(type, count, location) \ ( (type *) _hypre_MAlloc((size_t)(sizeof(type) * (count)), location) ) #define hypre_CTAlloc(type, count, location) \ ( (type *) hypre_CAlloc((size_t)(count), (size_t)sizeof(type), location) ) #define hypre_TReAlloc(ptr, type, count, location) \ ( (type *) hypre_ReAlloc((char *)ptr, (size_t)(sizeof(type) * (count)), location) ) #define hypre_TReAlloc_v2(ptr, old_type, old_count, new_type, new_count, location) \ ( (new_type *) hypre_ReAlloc_v2((char *)ptr, (size_t)(sizeof(old_type)*(old_count)), (size_t)(sizeof(new_type)*(new_count)), location) ) #define hypre_TMemcpy(dst, src, type, count, locdst, locsrc) \ (hypre_Memcpy((void *)(dst), (void *)(src), (size_t)(sizeof(type) * (count)), locdst, locsrc)) #define hypre_TFree(ptr, location) \ ( hypre_Free((void *)ptr, location), ptr = NULL ) #define _hypre_TFree(ptr, location) \ ( _hypre_Free((void *)ptr, location), ptr = NULL ) #endif /* #ifdef HYPRE_USING_MEMORY_TRACKER */ /*-------------------------------------------------------------------------- * Prototypes *--------------------------------------------------------------------------*/ /* memory.c */ void * hypre_Memset(void *ptr, HYPRE_Int value, size_t num, HYPRE_MemoryLocation location); void hypre_MemPrefetch(void *ptr, size_t size, HYPRE_MemoryLocation location); void * hypre_MAlloc(size_t size, HYPRE_MemoryLocation location); void * hypre_CAlloc( size_t count, size_t elt_size, HYPRE_MemoryLocation location); void hypre_Free(void *ptr, HYPRE_MemoryLocation location); void hypre_Memcpy(void *dst, void *src, size_t size, HYPRE_MemoryLocation loc_dst, HYPRE_MemoryLocation loc_src); void * hypre_ReAlloc(void *ptr, size_t size, HYPRE_MemoryLocation location); void * hypre_ReAlloc_v2(void *ptr, size_t old_size, size_t new_size, HYPRE_MemoryLocation location); void * _hypre_MAlloc(size_t size, hypre_MemoryLocation location); void _hypre_Free(void *ptr, hypre_MemoryLocation location); HYPRE_ExecutionPolicy hypre_GetExecPolicy1(HYPRE_MemoryLocation location); HYPRE_ExecutionPolicy hypre_GetExecPolicy2(HYPRE_MemoryLocation location1, HYPRE_MemoryLocation location2); HYPRE_Int hypre_GetPointerLocation(const void *ptr, hypre_MemoryLocation *memory_location); HYPRE_Int hypre_PrintMemoryTracker(); HYPRE_Int hypre_SetCubMemPoolSize( hypre_uint bin_growth, hypre_uint min_bin, hypre_uint max_bin, size_t max_cached_bytes ); HYPRE_Int hypre_umpire_host_pooled_allocate(void **ptr, size_t nbytes); HYPRE_Int hypre_umpire_host_pooled_free(void *ptr); void *hypre_umpire_host_pooled_realloc(void *ptr, size_t size); HYPRE_Int hypre_umpire_device_pooled_allocate(void **ptr, size_t nbytes); HYPRE_Int hypre_umpire_device_pooled_free(void *ptr); HYPRE_Int hypre_umpire_um_pooled_allocate(void **ptr, size_t nbytes); HYPRE_Int hypre_umpire_um_pooled_free(void *ptr); HYPRE_Int hypre_umpire_pinned_pooled_allocate(void **ptr, size_t nbytes); HYPRE_Int hypre_umpire_pinned_pooled_free(void *ptr); #ifdef HYPRE_USING_MEMORY_TRACKER hypre_MemoryTracker * hypre_MemoryTrackerCreate(); void hypre_MemoryTrackerDestroy(hypre_MemoryTracker *tracker); void hypre_MemoryTrackerInsert(const char *action, void *ptr, size_t nbytes, hypre_MemoryLocation memory_location, const char *filename, const char *function, HYPRE_Int line); HYPRE_Int hypre_PrintMemoryTracker(); #endif /* memory_dmalloc.c */ HYPRE_Int hypre_InitMemoryDebugDML( HYPRE_Int id ); HYPRE_Int hypre_FinalizeMemoryDebugDML( void ); char *hypre_MAllocDML( HYPRE_Int size, char *file, HYPRE_Int line ); char *hypre_CAllocDML( HYPRE_Int count, HYPRE_Int elt_size, char *file, HYPRE_Int line ); char *hypre_ReAllocDML( char *ptr, HYPRE_Int size, char *file, HYPRE_Int line ); void hypre_FreeDML( char *ptr, char *file, HYPRE_Int line ); /* GPU malloc prototype */ typedef void (*GPUMallocFunc)(void **, size_t); typedef void (*GPUMfreeFunc)(void *); #ifdef __cplusplus } #endif #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef HYPRE_OMP_DEVICE_H #define HYPRE_OMP_DEVICE_H #if defined(HYPRE_USING_DEVICE_OPENMP) #include "omp.h" /* OpenMP 4.5 device memory management */ extern HYPRE_Int hypre__global_offload; extern HYPRE_Int hypre__offload_device_num; extern HYPRE_Int hypre__offload_host_num; /* stats */ extern size_t hypre__target_allc_count; extern size_t hypre__target_free_count; extern size_t hypre__target_allc_bytes; extern size_t hypre__target_free_bytes; extern size_t hypre__target_htod_count; extern size_t hypre__target_dtoh_count; extern size_t hypre__target_htod_bytes; extern size_t hypre__target_dtoh_bytes; /* CHECK MODE: check if offloading has effect (turned on when configured with --enable-debug) * if we ``enter'' an address, it should not exist in device [o.w NO EFFECT] * if we ``exit'' or ''update'' an address, it should exist in device [o.w ERROR] * hypre__offload_flag: 0 == OK; 1 == WRONG */ #ifdef HYPRE_DEVICE_OPENMP_CHECK #define HYPRE_OFFLOAD_FLAG(devnum, hptr, type) HYPRE_Int hypre__offload_flag = (type[1] == 'n') == omp_target_is_present(hptr, devnum); #else #define HYPRE_OFFLOAD_FLAG(...) HYPRE_Int hypre__offload_flag = 0; /* non-debug mode, always OK */ #endif /* OMP 4.5 offloading macro */ #define hypre_omp_device_offload(devnum, hptr, datatype, offset, count, type1, type2) \ {\ /* devnum: device number \ * hptr: host poiter \ * datatype \ * type1: ``e(n)ter'', ''e(x)it'', or ``u(p)date'' \ * type2: ``(a)lloc'', ``(t)o'', ``(d)elete'', ''(f)rom'' \ */ \ datatype *hypre__offload_hptr = (datatype *) hptr; \ /* if hypre__global_offload == 0, or * hptr (host pointer) == NULL, * this offload will be IGNORED */ \ if (hypre__global_offload && hypre__offload_hptr != NULL) { \ /* offloading offset and size (in datatype) */ \ size_t hypre__offload_offset = offset, hypre__offload_size = count; \ /* in the CHECK mode, we test if this offload has effect */ \ HYPRE_OFFLOAD_FLAG(devnum, hypre__offload_hptr, type1) \ if (hypre__offload_flag) { \ printf("[!NO Effect! %s %d] device %d target: %6s %6s, data %p, [%ld:%ld]\n", __FILE__, __LINE__, devnum, type1, type2, (void *)hypre__offload_hptr, hypre__offload_offset, hypre__offload_size); exit(0); \ } else { \ size_t offload_bytes = count * sizeof(datatype); \ /* printf("[ %s %d] device %d target: %6s %6s, data %p, [%d:%d]\n", __FILE__, __LINE__, devnum, type1, type2, (void *)hypre__offload_hptr, hypre__offload_offset, hypre__offload_size); */ \ if (type1[1] == 'n' && type2[0] == 't') { \ /* enter to */\ hypre__target_allc_count ++; \ hypre__target_allc_bytes += offload_bytes; \ hypre__target_htod_count ++; \ hypre__target_htod_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target enter data map(to:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else if (type1[1] == 'n' && type2[0] == 'a') { \ /* enter alloc */ \ hypre__target_allc_count ++; \ hypre__target_allc_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target enter data map(alloc:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else if (type1[1] == 'x' && type2[0] == 'd') { \ /* exit delete */\ hypre__target_free_count ++; \ hypre__target_free_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target exit data map(delete:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else if (type1[1] == 'x' && type2[0] == 'f') {\ /* exit from */ \ hypre__target_free_count ++; \ hypre__target_free_bytes += offload_bytes; \ hypre__target_dtoh_count ++; \ hypre__target_dtoh_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target exit data map(from:hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else if (type1[1] == 'p' && type2[0] == 't') { \ /* update to */ \ hypre__target_htod_count ++; \ hypre__target_htod_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target update to(hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else if (type1[1] == 'p' && type2[0] == 'f') {\ /* update from */ \ hypre__target_dtoh_count ++; \ hypre__target_dtoh_bytes += offload_bytes; \ _Pragma (HYPRE_XSTR(omp target update from(hypre__offload_hptr[hypre__offload_offset:hypre__offload_size]))) \ } else {\ printf("error: unrecognized offloading type combination!\n"); exit(-1); \ } \ } \ } \ } HYPRE_Int HYPRE_OMPOffload(HYPRE_Int device, void *ptr, size_t num, const char *type1, const char *type2); HYPRE_Int HYPRE_OMPPtrIsMapped(void *p, HYPRE_Int device_num); HYPRE_Int HYPRE_OMPOffloadOn(); HYPRE_Int HYPRE_OMPOffloadOff(); HYPRE_Int HYPRE_OMPOffloadStatPrint(); #endif /* HYPRE_USING_DEVICE_OPENMP */ #endif /* HYPRE_OMP_DEVICE_H */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef hypre_THREADING_HEADER #define hypre_THREADING_HEADER #ifdef HYPRE_USING_OPENMP HYPRE_Int hypre_NumThreads( void ); HYPRE_Int hypre_NumActiveThreads( void ); HYPRE_Int hypre_GetThreadNum( void ); void hypre_SetNumThreads(HYPRE_Int nt); #else #define hypre_NumThreads() 1 #define hypre_NumActiveThreads() 1 #define hypre_GetThreadNum() 0 #define hypre_SetNumThreads(x) #endif void hypre_GetSimpleThreadPartition( HYPRE_Int *begin, HYPRE_Int *end, HYPRE_Int n ); #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header file for doing timing * *****************************************************************************/ #ifndef HYPRE_TIMING_HEADER #define HYPRE_TIMING_HEADER #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __cplusplus extern "C" { #endif /*-------------------------------------------------------------------------- * Prototypes for low-level timing routines *--------------------------------------------------------------------------*/ /* timer.c */ HYPRE_Real time_getWallclockSeconds( void ); HYPRE_Real time_getCPUSeconds( void ); HYPRE_Real time_get_wallclock_seconds_( void ); HYPRE_Real time_get_cpu_seconds_( void ); /*-------------------------------------------------------------------------- * With timing off *--------------------------------------------------------------------------*/ #ifndef HYPRE_TIMING #define hypre_InitializeTiming(name) 0 #define hypre_FinalizeTiming(index) #define hypre_IncFLOPCount(inc) #define hypre_BeginTiming(i) #define hypre_EndTiming(i) #define hypre_PrintTiming(heading, comm) #define hypre_ClearTiming() /*-------------------------------------------------------------------------- * With timing on *--------------------------------------------------------------------------*/ #else /*------------------------------------------------------- * Global timing structure *-------------------------------------------------------*/ typedef struct { HYPRE_Real *wall_time; HYPRE_Real *cpu_time; HYPRE_Real *flops; char **name; HYPRE_Int *state; /* boolean flag to allow for recursive timing */ HYPRE_Int *num_regs; /* count of how many times a name is registered */ HYPRE_Int num_names; HYPRE_Int size; HYPRE_Real wall_count; HYPRE_Real CPU_count; HYPRE_Real FLOP_count; } hypre_TimingType; #ifdef HYPRE_TIMING_GLOBALS hypre_TimingType *hypre_global_timing = NULL; #else extern hypre_TimingType *hypre_global_timing; #endif /*------------------------------------------------------- * Accessor functions *-------------------------------------------------------*/ #define hypre_TimingWallTime(i) (hypre_global_timing -> wall_time[(i)]) #define hypre_TimingCPUTime(i) (hypre_global_timing -> cpu_time[(i)]) #define hypre_TimingFLOPS(i) (hypre_global_timing -> flops[(i)]) #define hypre_TimingName(i) (hypre_global_timing -> name[(i)]) #define hypre_TimingState(i) (hypre_global_timing -> state[(i)]) #define hypre_TimingNumRegs(i) (hypre_global_timing -> num_regs[(i)]) #define hypre_TimingWallCount (hypre_global_timing -> wall_count) #define hypre_TimingCPUCount (hypre_global_timing -> CPU_count) #define hypre_TimingFLOPCount (hypre_global_timing -> FLOP_count) /*------------------------------------------------------- * Prototypes *-------------------------------------------------------*/ /* timing.c */ HYPRE_Int hypre_InitializeTiming( const char *name ); HYPRE_Int hypre_FinalizeTiming( HYPRE_Int time_index ); HYPRE_Int hypre_IncFLOPCount( HYPRE_BigInt inc ); HYPRE_Int hypre_BeginTiming( HYPRE_Int time_index ); HYPRE_Int hypre_EndTiming( HYPRE_Int time_index ); HYPRE_Int hypre_ClearTiming( void ); HYPRE_Int hypre_PrintTiming( const char *heading, MPI_Comm comm ); #endif #ifdef __cplusplus } #endif #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header file link lists * *****************************************************************************/ #ifndef HYPRE_LINKLIST_HEADER #define HYPRE_LINKLIST_HEADER #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __cplusplus extern "C" { #endif struct double_linked_list { HYPRE_Int data; struct double_linked_list *next_elt; struct double_linked_list *prev_elt; HYPRE_Int head; HYPRE_Int tail; }; typedef struct double_linked_list hypre_ListElement; typedef hypre_ListElement *hypre_LinkList; #ifdef __cplusplus } #endif #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef hypre_EXCHANGE_DATA_HEADER #define hypre_EXCHANGE_DATA_HEADER #define hypre_BinaryTreeParentId(tree) (tree->parent_id) #define hypre_BinaryTreeNumChild(tree) (tree->num_child) #define hypre_BinaryTreeChildIds(tree) (tree->child_id) #define hypre_BinaryTreeChildId(tree, i) (tree->child_id[i]) typedef struct { HYPRE_Int parent_id; HYPRE_Int num_child; HYPRE_Int *child_id; } hypre_BinaryTree; /* In the fill_response() function the user needs to set the recv__buf and the response_message_size. Memory of size send_response_storage has been alllocated for the send_buf (in exchange_data) - if more is needed, then realloc and adjust the send_response_storage. The realloc amount should be storage+overhead. If the response is an empty "confirmation" message, then set response_message_size =0 (and do not modify the send_buf) */ typedef struct { HYPRE_Int (*fill_response)(void* recv_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void* response_obj, MPI_Comm comm, void** response_buf, HYPRE_Int* response_message_size); HYPRE_Int send_response_overhead; /*set by exchange data */ HYPRE_Int send_response_storage; /*storage allocated for send_response_buf*/ void *data1; /*data fields user may want to access in fill_response */ void *data2; } hypre_DataExchangeResponse; HYPRE_Int hypre_CreateBinaryTree(HYPRE_Int, HYPRE_Int, hypre_BinaryTree*); HYPRE_Int hypre_DestroyBinaryTree(hypre_BinaryTree*); HYPRE_Int hypre_DataExchangeList(HYPRE_Int num_contacts, HYPRE_Int *contact_proc_list, void *contact_send_buf, HYPRE_Int *contact_send_buf_starts, HYPRE_Int contact_obj_size, HYPRE_Int response_obj_size, hypre_DataExchangeResponse *response_obj, HYPRE_Int max_response_size, HYPRE_Int rnum, MPI_Comm comm, void **p_response_recv_buf, HYPRE_Int **p_response_recv_buf_starts); #endif /* end of header */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header file for Caliper instrumentation macros * *****************************************************************************/ #ifndef CALIPER_INSTRUMENTATION_HEADER #define CALIPER_INSTRUMENTATION_HEADER #include "HYPRE_config.h" #ifdef HYPRE_USING_CALIPER #ifdef __cplusplus extern "C++" { #endif #include <caliper/cali.h> #ifdef __cplusplus } #endif static char hypre__levelname[16]; static char hypre__markname[1024]; #define HYPRE_ANNOTATE_FUNC_BEGIN CALI_MARK_FUNCTION_BEGIN #define HYPRE_ANNOTATE_FUNC_END CALI_MARK_FUNCTION_END #define HYPRE_ANNOTATE_LOOP_BEGIN(id, str) CALI_MARK_LOOP_BEGIN(id, str) #define HYPRE_ANNOTATE_LOOP_END(id) CALI_MARK_LOOP_END(id) #define HYPRE_ANNOTATE_ITER_BEGIN(id, it) CALI_MARK_ITERATION_BEGIN(id, it) #define HYPRE_ANNOTATE_ITER_END(id) CALI_MARK_ITERATION_END(id) #define HYPRE_ANNOTATE_REGION_BEGIN(...)\ {\ hypre_sprintf(hypre__markname, __VA_ARGS__);\ CALI_MARK_BEGIN(hypre__markname);\ } #define HYPRE_ANNOTATE_REGION_END(...)\ {\ hypre_sprintf(hypre__markname, __VA_ARGS__);\ CALI_MARK_END(hypre__markname);\ } #define HYPRE_ANNOTATE_MGLEVEL_BEGIN(lvl)\ {\ hypre_sprintf(hypre__levelname, "MG level %d", lvl);\ CALI_MARK_BEGIN(hypre__levelname);\ } #define HYPRE_ANNOTATE_MGLEVEL_END(lvl)\ {\ hypre_sprintf(hypre__levelname, "MG level %d", lvl);\ CALI_MARK_END(hypre__levelname);\ } #else #define HYPRE_ANNOTATE_FUNC_BEGIN #define HYPRE_ANNOTATE_FUNC_END #define HYPRE_ANNOTATE_LOOP_BEGIN(id, str) #define HYPRE_ANNOTATE_LOOP_END(id) #define HYPRE_ANNOTATE_ITER_BEGIN(id, it) #define HYPRE_ANNOTATE_ITER_END(id) #define HYPRE_ANNOTATE_REGION_BEGIN(...) #define HYPRE_ANNOTATE_REGION_END(...) #define HYPRE_ANNOTATE_MAX_MGLEVEL(lvl) #define HYPRE_ANNOTATE_MGLEVEL_BEGIN(lvl) #define HYPRE_ANNOTATE_MGLEVEL_END(lvl) #endif #endif /* CALIPER_INSTRUMENTATION_HEADER */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * General structures and values * *****************************************************************************/ #ifndef HYPRE_HANDLE_H #define HYPRE_HANDLE_H struct hypre_DeviceData; typedef struct hypre_DeviceData hypre_DeviceData; typedef struct { HYPRE_Int hypre_error; HYPRE_MemoryLocation memory_location; HYPRE_ExecutionPolicy default_exec_policy; HYPRE_ExecutionPolicy struct_exec_policy; #if defined(HYPRE_USING_GPU) hypre_DeviceData *device_data; /* device G-S options */ HYPRE_Int device_gs_method; #endif #if defined(HYPRE_USING_UMPIRE) char umpire_device_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN]; char umpire_um_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN]; char umpire_host_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN]; char umpire_pinned_pool_name[HYPRE_UMPIRE_POOL_NAME_MAX_LEN]; size_t umpire_device_pool_size; size_t umpire_um_pool_size; size_t umpire_host_pool_size; size_t umpire_pinned_pool_size; size_t umpire_block_size; HYPRE_Int own_umpire_device_pool; HYPRE_Int own_umpire_um_pool; HYPRE_Int own_umpire_host_pool; HYPRE_Int own_umpire_pinned_pool; umpire_resourcemanager umpire_rm; #endif /* user malloc/free function pointers */ GPUMallocFunc user_device_malloc; GPUMfreeFunc user_device_free; } hypre_Handle; /* accessor macros to hypre_Handle */ #define hypre_HandleMemoryLocation(hypre_handle) ((hypre_handle) -> memory_location) #define hypre_HandleDefaultExecPolicy(hypre_handle) ((hypre_handle) -> default_exec_policy) #define hypre_HandleStructExecPolicy(hypre_handle) ((hypre_handle) -> struct_exec_policy) #define hypre_HandleDeviceData(hypre_handle) ((hypre_handle) -> device_data) #define hypre_HandleDeviceGSMethod(hypre_handle) ((hypre_handle) -> device_gs_method) #define hypre_HandleCurandGenerator(hypre_handle) hypre_DeviceDataCurandGenerator(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCublasHandle(hypre_handle) hypre_DeviceDataCublasHandle(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCusparseHandle(hypre_handle) hypre_DeviceDataCusparseHandle(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleComputeStream(hypre_handle) hypre_DeviceDataComputeStream(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubBinGrowth(hypre_handle) hypre_DeviceDataCubBinGrowth(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubMinBin(hypre_handle) hypre_DeviceDataCubMinBin(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubMaxBin(hypre_handle) hypre_DeviceDataCubMaxBin(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubMaxCachedBytes(hypre_handle) hypre_DeviceDataCubMaxCachedBytes(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubDevAllocator(hypre_handle) hypre_DeviceDataCubDevAllocator(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleCubUvmAllocator(hypre_handle) hypre_DeviceDataCubUvmAllocator(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleDevice(hypre_handle) hypre_DeviceDataDevice(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleDeviceMaxWorkGroupSize(hypre_handle) hypre_DeviceDataDeviceMaxWorkGroupSize(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleComputeStreamNum(hypre_handle) hypre_DeviceDataComputeStreamNum(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleReduceBuffer(hypre_handle) hypre_DeviceDataReduceBuffer(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleStructCommRecvBuffer(hypre_handle) hypre_DeviceDataStructCommRecvBuffer(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleStructCommSendBuffer(hypre_handle) hypre_DeviceDataStructCommSendBuffer(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleStructCommRecvBufferSize(hypre_handle) hypre_DeviceDataStructCommRecvBufferSize(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleStructCommSendBufferSize(hypre_handle) hypre_DeviceDataStructCommSendBufferSize(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmUseCusparse(hypre_handle) hypre_DeviceDataSpgemmUseCusparse(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmAlgorithm(hypre_handle) hypre_DeviceDataSpgemmAlgorithm(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmRownnzEstimateMethod(hypre_handle) hypre_DeviceDataSpgemmRownnzEstimateMethod(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmRownnzEstimateNsamples(hypre_handle) hypre_DeviceDataSpgemmRownnzEstimateNsamples(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmRownnzEstimateMultFactor(hypre_handle) hypre_DeviceDataSpgemmRownnzEstimateMultFactor(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleSpgemmHashType(hypre_handle) hypre_DeviceDataSpgemmHashType(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleDeviceAllocator(hypre_handle) hypre_DeviceDataDeviceAllocator(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleUseGpuRand(hypre_handle) hypre_DeviceDataUseGpuRand(hypre_HandleDeviceData(hypre_handle)) #define hypre_HandleUserDeviceMalloc(hypre_handle) ((hypre_handle) -> user_device_malloc) #define hypre_HandleUserDeviceMfree(hypre_handle) ((hypre_handle) -> user_device_free) #define hypre_HandleUmpireResourceMan(hypre_handle) ((hypre_handle) -> umpire_rm) #define hypre_HandleUmpireDevicePoolSize(hypre_handle) ((hypre_handle) -> umpire_device_pool_size) #define hypre_HandleUmpireUMPoolSize(hypre_handle) ((hypre_handle) -> umpire_um_pool_size) #define hypre_HandleUmpireHostPoolSize(hypre_handle) ((hypre_handle) -> umpire_host_pool_size) #define hypre_HandleUmpirePinnedPoolSize(hypre_handle) ((hypre_handle) -> umpire_pinned_pool_size) #define hypre_HandleUmpireBlockSize(hypre_handle) ((hypre_handle) -> umpire_block_size) #define hypre_HandleUmpireDevicePoolName(hypre_handle) ((hypre_handle) -> umpire_device_pool_name) #define hypre_HandleUmpireUMPoolName(hypre_handle) ((hypre_handle) -> umpire_um_pool_name) #define hypre_HandleUmpireHostPoolName(hypre_handle) ((hypre_handle) -> umpire_host_pool_name) #define hypre_HandleUmpirePinnedPoolName(hypre_handle) ((hypre_handle) -> umpire_pinned_pool_name) #define hypre_HandleOwnUmpireDevicePool(hypre_handle) ((hypre_handle) -> own_umpire_device_pool) #define hypre_HandleOwnUmpireUMPool(hypre_handle) ((hypre_handle) -> own_umpire_um_pool) #define hypre_HandleOwnUmpireHostPool(hypre_handle) ((hypre_handle) -> own_umpire_host_pool) #define hypre_HandleOwnUmpirePinnedPool(hypre_handle) ((hypre_handle) -> own_umpire_pinned_pool) #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #ifndef HYPRE_GSELIM_H #define HYPRE_GSELIM_H #define hypre_gselim(A,x,n,error) \ { \ HYPRE_Int j,k,m; \ HYPRE_Real factor; \ HYPRE_Real divA; \ error = 0; \ if (n == 1) /* A is 1x1 */ \ { \ if (A[0] != 0.0) \ { \ x[0] = x[0]/A[0]; \ } \ else \ { \ error++; \ } \ } \ else/* A is nxn. Forward elimination */ \ { \ for (k = 0; k < n-1; k++) \ { \ if (A[k*n+k] != 0.0) \ { \ divA = 1.0/A[k*n+k]; \ for (j = k+1; j < n; j++) \ { \ if (A[j*n+k] != 0.0) \ { \ factor = A[j*n+k]*divA; \ for (m = k+1; m < n; m++) \ { \ A[j*n+m] -= factor * A[k*n+m]; \ } \ x[j] -= factor * x[k]; \ } \ } \ } \ } \ /* Back Substitution */ \ for (k = n-1; k > 0; --k) \ { \ if (A[k*n+k] != 0.0) \ { \ x[k] /= A[k*n+k]; \ for (j = 0; j < k; j++) \ { \ if (A[j*n+k] != 0.0) \ { \ x[j] -= x[k] * A[j*n+k]; \ } \ } \ } \ } \ if (A[0] != 0.0) x[0] /= A[0]; \ } \ } #endif /* #ifndef HYPRE_GSELIM_H */ /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Header file for hypre_IntArray struct for holding an array of integers * *****************************************************************************/ #ifndef hypre_INTARRAY_HEADER #define hypre_INTARRAY_HEADER /*-------------------------------------------------------------------------- * hypre_IntArray *--------------------------------------------------------------------------*/ typedef struct { /* pointer to data and size of data */ HYPRE_Int *data; HYPRE_Int size; /* memory location of array data */ HYPRE_MemoryLocation memory_location; } hypre_IntArray; /*-------------------------------------------------------------------------- * Accessor functions for the IntArray structure *--------------------------------------------------------------------------*/ #define hypre_IntArrayData(array) ((array) -> data) #define hypre_IntArraySize(array) ((array) -> size) #define hypre_IntArrayMemoryLocation(array) ((array) -> memory_location) #endif /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /* amg_linklist.c */ void hypre_dispose_elt ( hypre_LinkList element_ptr ); void hypre_remove_point ( hypre_LinkList *LoL_head_ptr, hypre_LinkList *LoL_tail_ptr, HYPRE_Int measure, HYPRE_Int index, HYPRE_Int *lists, HYPRE_Int *where ); hypre_LinkList hypre_create_elt ( HYPRE_Int Item ); void hypre_enter_on_lists ( hypre_LinkList *LoL_head_ptr, hypre_LinkList *LoL_tail_ptr, HYPRE_Int measure, HYPRE_Int index, HYPRE_Int *lists, HYPRE_Int *where ); /* binsearch.c */ HYPRE_Int hypre_BinarySearch ( HYPRE_Int *list, HYPRE_Int value, HYPRE_Int list_length ); HYPRE_Int hypre_BigBinarySearch ( HYPRE_BigInt *list, HYPRE_BigInt value, HYPRE_Int list_length ); HYPRE_Int hypre_BinarySearch2 ( HYPRE_Int *list, HYPRE_Int value, HYPRE_Int low, HYPRE_Int high, HYPRE_Int *spot ); HYPRE_Int *hypre_LowerBound( HYPRE_Int *first, HYPRE_Int *last, HYPRE_Int value ); HYPRE_BigInt *hypre_BigLowerBound( HYPRE_BigInt *first, HYPRE_BigInt *last, HYPRE_BigInt value ); /* log.c */ HYPRE_Int hypre_Log2( HYPRE_Int p ); /* complex.c */ #ifdef HYPRE_COMPLEX HYPRE_Complex hypre_conj( HYPRE_Complex value ); HYPRE_Real hypre_cabs( HYPRE_Complex value ); HYPRE_Real hypre_creal( HYPRE_Complex value ); HYPRE_Real hypre_cimag( HYPRE_Complex value ); #else #define hypre_conj(value) value #define hypre_cabs(value) fabs(value) #define hypre_creal(value) value #define hypre_cimag(value) 0.0 #endif /* general.c */ #ifdef HYPRE_USING_MEMORY_TRACKER hypre_MemoryTracker* hypre_memory_tracker(); #endif hypre_Handle* hypre_handle(); hypre_Handle* hypre_HandleCreate(); HYPRE_Int hypre_HandleDestroy(hypre_Handle *hypre_handle_); HYPRE_Int hypre_SetDevice(hypre_int device_id, hypre_Handle *hypre_handle_); HYPRE_Int hypre_GetDevice(hypre_int *device_id); HYPRE_Int hypre_GetDeviceCount(hypre_int *device_count); HYPRE_Int hypre_GetDeviceLastError(); HYPRE_Int hypre_UmpireInit(hypre_Handle *hypre_handle_); HYPRE_Int hypre_UmpireFinalize(hypre_Handle *hypre_handle_); /* qsort.c */ void hypre_swap ( HYPRE_Int *v, HYPRE_Int i, HYPRE_Int j ); void hypre_swap_c ( HYPRE_Complex *v, HYPRE_Int i, HYPRE_Int j ); void hypre_swap2 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int i, HYPRE_Int j ); void hypre_BigSwap2 ( HYPRE_BigInt *v, HYPRE_Real *w, HYPRE_Int i, HYPRE_Int j ); void hypre_swap2i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j ); void hypre_BigSwap2i ( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j ); void hypre_swap3i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j ); void hypre_swap3_d ( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j ); void hypre_swap3_d_perm(HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j ); void hypre_BigSwap4_d ( HYPRE_Real *v, HYPRE_BigInt *w, HYPRE_Int *z, HYPRE_Int *y, HYPRE_Int i, HYPRE_Int j ); void hypre_swap_d ( HYPRE_Real *v, HYPRE_Int i, HYPRE_Int j ); void hypre_qsort0 ( HYPRE_Int *v, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort1 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ); void hypre_BigQsort1 ( HYPRE_BigInt *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort2i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right ); void hypre_BigQsort2i( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort2 ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort2_abs ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort3i ( HYPRE_Int *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort3ir ( HYPRE_Int *v, HYPRE_Real *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort3( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort3_abs ( HYPRE_Real *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right ); void hypre_BigQsort4_abs ( HYPRE_Real *v, HYPRE_BigInt *w, HYPRE_Int *z, HYPRE_Int *y, HYPRE_Int left, HYPRE_Int right ); void hypre_qsort_abs ( HYPRE_Real *w, HYPRE_Int left, HYPRE_Int right ); void hypre_BigSwapbi(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j ); void hypre_BigQsortbi( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right ); void hypre_BigSwapLoc(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int i, HYPRE_Int j ); void hypre_BigQsortbLoc( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int left, HYPRE_Int right ); void hypre_BigSwapb2i(HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int i, HYPRE_Int j ); void hypre_BigQsortb2i( HYPRE_BigInt *v, HYPRE_Int *w, HYPRE_Int *z, HYPRE_Int left, HYPRE_Int right ); void hypre_BigSwap( HYPRE_BigInt *v, HYPRE_Int i, HYPRE_Int j ); void hypre_BigQsort0( HYPRE_BigInt *v, HYPRE_Int left, HYPRE_Int right ); void hypre_topo_sort(const HYPRE_Int *row_ptr, const HYPRE_Int *col_inds, const HYPRE_Complex *data, HYPRE_Int *ordering, HYPRE_Int n); void hypre_dense_topo_sort(const HYPRE_Complex *L, HYPRE_Int *ordering, HYPRE_Int n, HYPRE_Int is_col_major); /* qsplit.c */ HYPRE_Int hypre_DoubleQuickSplit ( HYPRE_Real *values, HYPRE_Int *indices, HYPRE_Int list_length, HYPRE_Int NumberKept ); /* random.c */ /* HYPRE_CUDA_GLOBAL */ void hypre_SeedRand ( HYPRE_Int seed ); /* HYPRE_CUDA_GLOBAL */ HYPRE_Int hypre_RandI ( void ); /* HYPRE_CUDA_GLOBAL */ HYPRE_Real hypre_Rand ( void ); /* prefix_sum.c */ /** * Assumed to be called within an omp region. * Let x_i be the input of ith thread. * The output of ith thread y_i = x_0 + x_1 + ... + x_{i-1} * Additionally, sum = x_0 + x_1 + ... + x_{nthreads - 1} * Note that always y_0 = 0 * * @param workspace at least with length (nthreads+1) * workspace[tid] will contain result for tid * workspace[nthreads] will contain sum */ void hypre_prefix_sum(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int *workspace); /** * This version does prefix sum in pair. * Useful when we prefix sum of diag and offd in tandem. * * @param worksapce at least with length 2*(nthreads+1) * workspace[2*tid] and workspace[2*tid+1] will contain results for tid * workspace[3*nthreads] and workspace[3*nthreads + 1] will contain sums */ void hypre_prefix_sum_pair(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *workspace); /** * @param workspace at least with length 3*(nthreads+1) * workspace[3*tid:3*tid+3) will contain results for tid */ void hypre_prefix_sum_triple(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *in_out3, HYPRE_Int *sum3, HYPRE_Int *workspace); /** * n prefix-sums together. * workspace[n*tid:n*(tid+1)) will contain results for tid * workspace[nthreads*tid:nthreads*(tid+1)) will contain sums * * @param workspace at least with length n*(nthreads+1) */ void hypre_prefix_sum_multiple(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int n, HYPRE_Int *workspace); /* hopscotch_hash.c */ #ifdef HYPRE_USING_OPENMP /* Check if atomic operations are available to use concurrent hopscotch hash table */ #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 #define HYPRE_USING_ATOMIC //#elif defined _MSC_VER // JSP: haven't tested, so comment out for now //#define HYPRE_USING_ATOMIC //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //#define HYPRE_USING_ATOMIC //#include <stdatomic.h> #endif #endif // HYPRE_USING_OPENMP #ifdef HYPRE_HOPSCOTCH #ifdef HYPRE_USING_ATOMIC // concurrent hopscotch hashing is possible only with atomic supports #define HYPRE_CONCURRENT_HOPSCOTCH #endif #endif #ifdef HYPRE_CONCURRENT_HOPSCOTCH typedef struct { HYPRE_Int volatile timestamp; omp_lock_t lock; } hypre_HopscotchSegment; #endif /** * The current typical use case of unordered set is putting input sequence * with lots of duplication (putting all colidx received from other ranks), * followed by one sweep of enumeration. * Since the capacity is set to the number of inputs, which is much larger * than the number of unique elements, we optimize for initialization and * enumeration whose time is proportional to the capacity. * For initialization and enumeration, structure of array (SoA) is better * for vectorization, cache line utilization, and so on. */ typedef struct { HYPRE_Int volatile segmentMask; HYPRE_Int volatile bucketMask; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* volatile segments; #endif HYPRE_Int *volatile key; hypre_uint *volatile hopInfo; HYPRE_Int *volatile hash; } hypre_UnorderedIntSet; typedef struct { HYPRE_Int volatile segmentMask; HYPRE_Int volatile bucketMask; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* volatile segments; #endif HYPRE_BigInt *volatile key; hypre_uint *volatile hopInfo; HYPRE_BigInt *volatile hash; } hypre_UnorderedBigIntSet; typedef struct { hypre_uint volatile hopInfo; HYPRE_Int volatile hash; HYPRE_Int volatile key; HYPRE_Int volatile data; } hypre_HopscotchBucket; typedef struct { hypre_uint volatile hopInfo; HYPRE_BigInt volatile hash; HYPRE_BigInt volatile key; HYPRE_Int volatile data; } hypre_BigHopscotchBucket; /** * The current typical use case of unoredered map is putting input sequence * with no duplication (inverse map of a bijective mapping) followed by * lots of lookups. * For lookup, array of structure (AoS) gives better cache line utilization. */ typedef struct { HYPRE_Int volatile segmentMask; HYPRE_Int volatile bucketMask; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* volatile segments; #endif hypre_HopscotchBucket* volatile table; } hypre_UnorderedIntMap; typedef struct { HYPRE_Int volatile segmentMask; HYPRE_Int volatile bucketMask; #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* volatile segments; #endif hypre_BigHopscotchBucket* volatile table; } hypre_UnorderedBigIntMap; /* merge_sort.c */ /** * Why merge sort? * 1) Merge sort can take advantage of eliminating duplicates. * 2) Merge sort is more efficiently parallelizable than qsort */ HYPRE_Int hypre_MergeOrderedArrays( HYPRE_Int size1, HYPRE_Int *array1, HYPRE_Int size2, HYPRE_Int *array2, HYPRE_Int *size3_ptr, HYPRE_Int **array3_ptr); void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3, HYPRE_Int *map1, HYPRE_Int *map2); void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **sorted); void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **sorted); void hypre_sort_and_create_inverse_map(HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map); void hypre_big_sort_and_create_inverse_map(HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map); #if defined(HYPRE_USING_GPU) HYPRE_Int hypre_SyncCudaComputeStream(hypre_Handle *hypre_handle); HYPRE_Int hypre_SyncCudaDevice(hypre_Handle *hypre_handle); HYPRE_Int hypre_ResetCudaDevice(hypre_Handle *hypre_handle); HYPRE_Int hypreDevice_DiagScaleVector(HYPRE_Int n, HYPRE_Int *A_i, HYPRE_Complex *A_data, HYPRE_Complex *x, HYPRE_Complex beta, HYPRE_Complex *y); HYPRE_Int hypreDevice_DiagScaleVector2(HYPRE_Int n, HYPRE_Int *A_i, HYPRE_Complex *A_data, HYPRE_Complex *x, HYPRE_Complex beta, HYPRE_Complex *y, HYPRE_Complex *z); HYPRE_Int hypreDevice_IVAXPY(HYPRE_Int n, HYPRE_Complex *a, HYPRE_Complex *x, HYPRE_Complex *y); HYPRE_Int hypreDevice_IVAXPYMarked(HYPRE_Int n, HYPRE_Complex *a, HYPRE_Complex *x, HYPRE_Complex *y, HYPRE_Int *marker, HYPRE_Int marker_val); HYPRE_Int hypreDevice_BigIntFilln(HYPRE_BigInt *d_x, size_t n, HYPRE_BigInt v); HYPRE_Int hypreDevice_Filln(HYPRE_Complex *d_x, size_t n, HYPRE_Complex v); HYPRE_Int hypreDevice_Scalen(HYPRE_Complex *d_x, size_t n, HYPRE_Complex v); #endif HYPRE_Int hypre_CurandUniform( HYPRE_Int n, HYPRE_Real *urand, HYPRE_Int set_seed, hypre_ulonglongint seed, HYPRE_Int set_offset, hypre_ulonglongint offset); HYPRE_Int hypre_CurandUniformSingle( HYPRE_Int n, float *urand, HYPRE_Int set_seed, hypre_ulonglongint seed, HYPRE_Int set_offset, hypre_ulonglongint offset); HYPRE_Int hypre_bind_device(HYPRE_Int myid, HYPRE_Int nproc, MPI_Comm comm); /* nvtx.c */ void hypre_GpuProfilingPushRangeColor(const char *name, HYPRE_Int cid); void hypre_GpuProfilingPushRange(const char *name); void hypre_GpuProfilingPopRange(); /* utilities.c */ HYPRE_Int hypre_multmod(HYPRE_Int a, HYPRE_Int b, HYPRE_Int mod); void hypre_partition1D(HYPRE_Int n, HYPRE_Int p, HYPRE_Int j, HYPRE_Int *s, HYPRE_Int *e); char *hypre_strcpy(char *destination, const char *source); HYPRE_Int hypre_SetSyncCudaCompute(HYPRE_Int action); HYPRE_Int hypre_RestoreSyncCudaCompute(); HYPRE_Int hypre_GetSyncCudaCompute(HYPRE_Int *cuda_compute_stream_sync_ptr); HYPRE_Int hypre_SyncCudaComputeStream(hypre_Handle *hypre_handle); /* handle.c */ HYPRE_Int hypre_SetSpGemmUseCusparse( HYPRE_Int use_cusparse ); HYPRE_Int hypre_SetSpGemmAlgorithm( HYPRE_Int value ); HYPRE_Int hypre_SetSpGemmRownnzEstimateMethod( HYPRE_Int value ); HYPRE_Int hypre_SetSpGemmRownnzEstimateNSamples( HYPRE_Int value ); HYPRE_Int hypre_SetSpGemmRownnzEstimateMultFactor( HYPRE_Real value ); HYPRE_Int hypre_SetSpGemmHashType( char value ); HYPRE_Int hypre_SetUseGpuRand( HYPRE_Int use_gpurand ); HYPRE_Int hypre_SetGaussSeidelMethod( HYPRE_Int gs_method ); HYPRE_Int hypre_SetUserDeviceMalloc(GPUMallocFunc func); HYPRE_Int hypre_SetUserDeviceMfree(GPUMfreeFunc func); /* int_array.c */ hypre_IntArray* hypre_IntArrayCreate( HYPRE_Int size ); HYPRE_Int hypre_IntArrayDestroy( hypre_IntArray *array ); HYPRE_Int hypre_IntArrayInitialize_v2( hypre_IntArray *array, HYPRE_MemoryLocation memory_location ); HYPRE_Int hypre_IntArrayInitialize( hypre_IntArray *array ); HYPRE_Int hypre_IntArrayCopy( hypre_IntArray *x, hypre_IntArray *y ); hypre_IntArray* hypre_IntArrayCloneDeep_v2( hypre_IntArray *x, HYPRE_MemoryLocation memory_location ); hypre_IntArray* hypre_IntArrayCloneDeep( hypre_IntArray *x ); HYPRE_Int hypre_IntArraySetConstantValues( hypre_IntArray *v, HYPRE_Int value ); /****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /** * Hopscotch hash is modified from the code downloaded from * https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing * with the following terms of usage */ //////////////////////////////////////////////////////////////////////////////// //TERMS OF USAGE //------------------------------------------------------------------------------ // // Permission to use, copy, modify and distribute this software and // its documentation for any purpose is hereby granted without fee, // provided that due acknowledgments to the authors are provided and // this permission notice appears in all copies of the software. // The software is provided "as is". There is no warranty of any kind. // //Authors: // Maurice Herlihy // Brown University // and // Nir Shavit // Tel-Aviv University // and // Moran Tzafrir // Tel-Aviv University // // Date: July 15, 2008. // //////////////////////////////////////////////////////////////////////////////// // Programmer : Moran Tzafrir (MoranTza@gmail.com) // Modified : Jongsoo Park (jongsoo.park@intel.com) // Oct 1, 2015. // //////////////////////////////////////////////////////////////////////////////// #ifndef hypre_HOPSCOTCH_HASH_HEADER #define hypre_HOPSCOTCH_HASH_HEADER //#include <strings.h> #include <string.h> #include <stdio.h> #include <limits.h> #include <math.h> #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif #include "_hypre_utilities.h" // Potentially architecture specific features used here: // __sync_val_compare_and_swap #ifdef __cplusplus extern "C" { #endif /****************************************************************************** * This next section of code is here instead of in _hypre_utilities.h to get * around some portability issues with Visual Studio. By putting it here, we * can explicitly include this '.h' file in a few files in hypre and compile * them with C++ instead of C (VS does not support C99 'inline'). ******************************************************************************/ #ifdef HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval ) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_val_compare_and_swap(ptr, oldval, newval); //#elif defind _MSC_VER //return _InterlockedCompareExchange((long *)ptr, newval, oldval); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //atomic_compare_exchange_strong(atomic_ptr, &oldval, newval); //return oldval; #endif } static inline HYPRE_Int hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value ) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_fetch_and_add(ptr, value); //#elif defined _MSC_VER //return _InterlockedExchangeAdd((long *)ptr, value); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //return atomic_fetch_add(atomic_ptr, value); #endif } #else // !HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap( HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval ) { if (*ptr == oldval) { *ptr = newval; return oldval; } else { return *ptr; } } static inline HYPRE_Int hypre_fetch_and_add( HYPRE_Int *ptr, HYPRE_Int value ) { HYPRE_Int oldval = *ptr; *ptr += value; return oldval; } #endif // !HYPRE_USING_ATOMIC /******************************************************************************/ // Constants ................................................................ #define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32) #define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024) #define HYPRE_HOPSCOTCH_HASH_EMPTY (0) #define HYPRE_HOPSCOTCH_HASH_BUSY (1) // Small Utilities .......................................................... static inline HYPRE_Int first_lsb_bit_indx( hypre_uint x ) { HYPRE_Int pos; #if defined(_MSC_VER) || defined(__MINGW64__) if (x == 0) { pos = 0; } else { for (pos = 1; !(x & 1); ++pos) { x >>= 1; } } #else pos = ffs(x); #endif return (pos - 1); } /** * hypre_Hash is adapted from xxHash with the following license. */ /* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash */ /*************************************** * Constants ***************************************/ #define HYPRE_XXH_PRIME32_1 2654435761U #define HYPRE_XXH_PRIME32_2 2246822519U #define HYPRE_XXH_PRIME32_3 3266489917U #define HYPRE_XXH_PRIME32_4 668265263U #define HYPRE_XXH_PRIME32_5 374761393U #define HYPRE_XXH_PRIME64_1 11400714785074694791ULL #define HYPRE_XXH_PRIME64_2 14029467366897019727ULL #define HYPRE_XXH_PRIME64_3 1609587929392839161ULL #define HYPRE_XXH_PRIME64_4 9650029242287828579ULL #define HYPRE_XXH_PRIME64_5 2870177450012600261ULL #define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) #define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT) static inline HYPRE_BigInt hypre_BigHash( HYPRE_BigInt input ) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_BigHash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif #ifdef HYPRE_BIGINT static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27) * HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975) * HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17) * HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif static inline void hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif hypre_HopscotchBucket **free_bucket, HYPRE_Int *free_dist) { hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } static inline void hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *start_seg, #endif hypre_BigHopscotchBucket **free_bucket, HYPRE_Int *free_dist) { hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) { omp_set_lock(&move_segment->lock); } #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (start_seg != move_segment) { omp_unset_lock(&move_segment->lock); } #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ); void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s ); void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m ); void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m ); // Query Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; #endif HYPRE_Int bucket = hash & s->bucketMask; hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) { return 0; } else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) { return 1; } else { return 0; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { return 1; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return 0; } #endif HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) { return 1; } } return 0; } static inline HYPRE_Int hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)]; #endif HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask); hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) { return 0; } else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) { return 1; } else { return 0; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { return 1; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return 0; } #endif HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) { return 1; } } return 0; } /** * @ret -1 if key doesn't exist */ static inline HYPRE_Int hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; #endif hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) { return -1; } else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) { return elmAry->data; } else { return -1; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) { return currElm->data; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return -1; } #endif hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) { return currBucket->data; } } return -1; } static inline HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)]; #endif hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) { return -1; } else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) { return elmAry->data; } else { return -1; } } #ifdef HYPRE_CONCURRENT_HOPSCOTCH HYPRE_Int startTimestamp = segment->timestamp; #endif while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) { return currElm->data; } hopInfo &= ~(1U << i); } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if (segment->timestamp == startTimestamp) { return -1; } #endif hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i < HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) { return currBucket->data; } } return -1; } //status Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetSize( hypre_UnorderedIntSet *s ) { HYPRE_Int counter = 0; HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntSetSize( hypre_UnorderedBigIntSet *s ) { HYPRE_Int counter = 0; HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedIntMapSize( hypre_UnorderedIntMap *m ) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntMapSize( hypre_UnorderedBigIntMap *m ) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if ( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ); HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len ); //modification Operations ................................................... static inline void hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); #endif HYPRE_Int bucket = hash & s->bucketMask; //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hypre_UnorderedIntSetFindCloserFreeBucket(s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline void hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); #endif HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return; } hypre_UnorderedBigIntSetFindCloserFreeBucket(s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline HYPRE_Int hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m, HYPRE_Int key, HYPRE_Int data ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); #endif hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_HopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedIntMapFindCloserFreeBucket(m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } static inline HYPRE_Int hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key, HYPRE_Int data) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); #endif hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_BigHopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if ( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) { break; } } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH omp_unset_lock(&segment->lock); #endif return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedBigIntMapFindCloserFreeBucket(m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH segment, #endif &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } #ifdef __cplusplus } // extern "C" #endif #endif // hypre_HOPSCOTCH_HASH_HEADER #ifdef __cplusplus } #endif #endif
collatzStatic.c
// test file to execute the collatz conjecture on 1 proc #include <stdio.h> #include <stdlib.h> #include <omp.h> typedef unsigned long long ullong; ullong hotpo(ullong currn); int main(int argc, char** argv) { ullong n, // track current n high, // highest number recorded nmax = (argc > 1) ? atoi(argv[1]) : 50, imax = 2000000; // max number of iteration for a seed n #pragma omp parallel { printf("worker %d/%d ready to roll\n", omp_get_thread_num(), omp_get_num_threads()); } /* timers */ double startTime = omp_get_wtime(), endTime; high = 0; // starting with n itself as highest // #pragma omp parallel for private(high) schedule(static) #pragma omp parallel for schedule(static, 50) reduction(max:high) for(ullong j = 1; j <= nmax; ++j) { n = j; // printf("n: %lld", n); for(ullong i = 1; i <= imax; ++i) { n = hotpo(n); if(n > high) high = n; // if(i < 10) printf(",%lld",n); if( n == 1 ) break; // stop if reach 1 } // printf("\n"); } printf("\nHigh: %lld\n", high); endTime = omp_get_wtime(); printf("\nruntime = %.16e\n", endTime - startTime); return 0; } ullong hotpo(ullong currn) { return ( (currn % 2 == 0)? currn/2 : 3*currn + 1 ); }
z_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" #include "timers.h" //--------------------------------------------------------------------- // Performs line solves in Z direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. //--------------------------------------------------------------------- void z_solve() { // printf("zzzzzzzzz\n"); int i, j, k, m, n, ksize; //kai // int k14; //consistent_data(&k14, "int", 1); //--------------------------------------------------------------------- //--------------------------------------------------------------------- if (timeron) timer_start(t_zsolve); //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // This function computes the left hand side for the three z-factors //--------------------------------------------------------------------- ksize = grid_points[2]-1; //--------------------------------------------------------------------- // Compute the indices for storing the block-diagonal matrix; // determine c (labeled f) and s jacobians //--------------------------------------------------------------------- #pragma omp parallel for default(shared) shared(ksize) private(i,j,k,m,n) for (j = k14+1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (k = 0; k <= ksize; k++) { tmp1 = 1.0 / u[k][j][i][0]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[k][0][0] = 0.0; fjac[k][1][0] = 0.0; fjac[k][2][0] = 0.0; fjac[k][3][0] = 1.0; fjac[k][4][0] = 0.0; fjac[k][0][1] = - ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2; fjac[k][1][1] = u[k][j][i][3] * tmp1; fjac[k][2][1] = 0.0; fjac[k][3][1] = u[k][j][i][1] * tmp1; fjac[k][4][1] = 0.0; fjac[k][0][2] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[k][1][2] = 0.0; fjac[k][2][2] = u[k][j][i][3] * tmp1; fjac[k][3][2] = u[k][j][i][2] * tmp1; fjac[k][4][2] = 0.0; fjac[k][0][3] = - (u[k][j][i][3]*u[k][j][i][3] * tmp2 ) + c2 * qs[k][j][i]; fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1; fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1; fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1; fjac[k][4][3] = c2; fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * u[k][j][i][3] * tmp2; fjac[k][1][4] = - c2 * ( u[k][j][i][1]*u[k][j][i][3] ) * tmp2; fjac[k][2][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 ) - c2 * ( qs[k][j][i] + u[k][j][i][3]*u[k][j][i][3] * tmp2 ); fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1; njac[k][0][0] = 0.0; njac[k][1][0] = 0.0; njac[k][2][0] = 0.0; njac[k][3][0] = 0.0; njac[k][4][0] = 0.0; njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1]; njac[k][1][1] = c3c4 * tmp1; njac[k][2][1] = 0.0; njac[k][3][1] = 0.0; njac[k][4][1] = 0.0; njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2]; njac[k][1][2] = 0.0; njac[k][2][2] = c3c4 * tmp1; njac[k][3][2] = 0.0; njac[k][4][2] = 0.0; njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3]; njac[k][1][3] = 0.0; njac[k][2][3] = 0.0; njac[k][3][3] = con43 * c3 * c4 * tmp1; njac[k][4][3] = 0.0; njac[k][0][4] = - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( con43 * c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4]; njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1]; njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2]; njac[k][3][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][3]; njac[k][4][4] = ( c1345 )* tmp1; } //--------------------------------------------------------------------- // now jacobians set, so form left hand side in z direction //--------------------------------------------------------------------- lhsinit(lhs, ksize); for (k = 1; k <= ksize-1; k++) { tmp1 = dt * tz1; tmp2 = dt * tz2; lhs[k][AA][0][0] = - tmp2 * fjac[k-1][0][0] - tmp1 * njac[k-1][0][0] - tmp1 * dz1; lhs[k][AA][1][0] = - tmp2 * fjac[k-1][1][0] - tmp1 * njac[k-1][1][0]; lhs[k][AA][2][0] = - tmp2 * fjac[k-1][2][0] - tmp1 * njac[k-1][2][0]; lhs[k][AA][3][0] = - tmp2 * fjac[k-1][3][0] - tmp1 * njac[k-1][3][0]; lhs[k][AA][4][0] = - tmp2 * fjac[k-1][4][0] - tmp1 * njac[k-1][4][0]; lhs[k][AA][0][1] = - tmp2 * fjac[k-1][0][1] - tmp1 * njac[k-1][0][1]; lhs[k][AA][1][1] = - tmp2 * fjac[k-1][1][1] - tmp1 * njac[k-1][1][1] - tmp1 * dz2; lhs[k][AA][2][1] = - tmp2 * fjac[k-1][2][1] - tmp1 * njac[k-1][2][1]; lhs[k][AA][3][1] = - tmp2 * fjac[k-1][3][1] - tmp1 * njac[k-1][3][1]; lhs[k][AA][4][1] = - tmp2 * fjac[k-1][4][1] - tmp1 * njac[k-1][4][1]; lhs[k][AA][0][2] = - tmp2 * fjac[k-1][0][2] - tmp1 * njac[k-1][0][2]; lhs[k][AA][1][2] = - tmp2 * fjac[k-1][1][2] - tmp1 * njac[k-1][1][2]; lhs[k][AA][2][2] = - tmp2 * fjac[k-1][2][2] - tmp1 * njac[k-1][2][2] - tmp1 * dz3; lhs[k][AA][3][2] = - tmp2 * fjac[k-1][3][2] - tmp1 * njac[k-1][3][2]; lhs[k][AA][4][2] = - tmp2 * fjac[k-1][4][2] - tmp1 * njac[k-1][4][2]; lhs[k][AA][0][3] = - tmp2 * fjac[k-1][0][3] - tmp1 * njac[k-1][0][3]; lhs[k][AA][1][3] = - tmp2 * fjac[k-1][1][3] - tmp1 * njac[k-1][1][3]; lhs[k][AA][2][3] = - tmp2 * fjac[k-1][2][3] - tmp1 * njac[k-1][2][3]; lhs[k][AA][3][3] = - tmp2 * fjac[k-1][3][3] - tmp1 * njac[k-1][3][3] - tmp1 * dz4; lhs[k][AA][4][3] = - tmp2 * fjac[k-1][4][3] - tmp1 * njac[k-1][4][3]; lhs[k][AA][0][4] = - tmp2 * fjac[k-1][0][4] - tmp1 * njac[k-1][0][4]; lhs[k][AA][1][4] = - tmp2 * fjac[k-1][1][4] - tmp1 * njac[k-1][1][4]; lhs[k][AA][2][4] = - tmp2 * fjac[k-1][2][4] - tmp1 * njac[k-1][2][4]; lhs[k][AA][3][4] = - tmp2 * fjac[k-1][3][4] - tmp1 * njac[k-1][3][4]; lhs[k][AA][4][4] = - tmp2 * fjac[k-1][4][4] - tmp1 * njac[k-1][4][4] - tmp1 * dz5; lhs[k][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[k][0][0] + tmp1 * 2.0 * dz1; lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0]; lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0]; lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0]; lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0]; lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1]; lhs[k][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[k][1][1] + tmp1 * 2.0 * dz2; lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1]; lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1]; lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1]; lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2]; lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2]; lhs[k][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[k][2][2] + tmp1 * 2.0 * dz3; lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2]; lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2]; lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3]; lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3]; lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3]; lhs[k][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[k][3][3] + tmp1 * 2.0 * dz4; lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3]; lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4]; lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4]; lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4]; lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4]; lhs[k][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[k][4][4] + tmp1 * 2.0 * dz5; lhs[k][CC][0][0] = tmp2 * fjac[k+1][0][0] - tmp1 * njac[k+1][0][0] - tmp1 * dz1; lhs[k][CC][1][0] = tmp2 * fjac[k+1][1][0] - tmp1 * njac[k+1][1][0]; lhs[k][CC][2][0] = tmp2 * fjac[k+1][2][0] - tmp1 * njac[k+1][2][0]; lhs[k][CC][3][0] = tmp2 * fjac[k+1][3][0] - tmp1 * njac[k+1][3][0]; lhs[k][CC][4][0] = tmp2 * fjac[k+1][4][0] - tmp1 * njac[k+1][4][0]; lhs[k][CC][0][1] = tmp2 * fjac[k+1][0][1] - tmp1 * njac[k+1][0][1]; lhs[k][CC][1][1] = tmp2 * fjac[k+1][1][1] - tmp1 * njac[k+1][1][1] - tmp1 * dz2; lhs[k][CC][2][1] = tmp2 * fjac[k+1][2][1] - tmp1 * njac[k+1][2][1]; lhs[k][CC][3][1] = tmp2 * fjac[k+1][3][1] - tmp1 * njac[k+1][3][1]; lhs[k][CC][4][1] = tmp2 * fjac[k+1][4][1] - tmp1 * njac[k+1][4][1]; lhs[k][CC][0][2] = tmp2 * fjac[k+1][0][2] - tmp1 * njac[k+1][0][2]; lhs[k][CC][1][2] = tmp2 * fjac[k+1][1][2] - tmp1 * njac[k+1][1][2]; lhs[k][CC][2][2] = tmp2 * fjac[k+1][2][2] - tmp1 * njac[k+1][2][2] - tmp1 * dz3; lhs[k][CC][3][2] = tmp2 * fjac[k+1][3][2] - tmp1 * njac[k+1][3][2]; lhs[k][CC][4][2] = tmp2 * fjac[k+1][4][2] - tmp1 * njac[k+1][4][2]; lhs[k][CC][0][3] = tmp2 * fjac[k+1][0][3] - tmp1 * njac[k+1][0][3]; lhs[k][CC][1][3] = tmp2 * fjac[k+1][1][3] - tmp1 * njac[k+1][1][3]; lhs[k][CC][2][3] = tmp2 * fjac[k+1][2][3] - tmp1 * njac[k+1][2][3]; lhs[k][CC][3][3] = tmp2 * fjac[k+1][3][3] - tmp1 * njac[k+1][3][3] - tmp1 * dz4; lhs[k][CC][4][3] = tmp2 * fjac[k+1][4][3] - tmp1 * njac[k+1][4][3]; lhs[k][CC][0][4] = tmp2 * fjac[k+1][0][4] - tmp1 * njac[k+1][0][4]; lhs[k][CC][1][4] = tmp2 * fjac[k+1][1][4] - tmp1 * njac[k+1][1][4]; lhs[k][CC][2][4] = tmp2 * fjac[k+1][2][4] - tmp1 * njac[k+1][2][4]; lhs[k][CC][3][4] = tmp2 * fjac[k+1][3][4] - tmp1 * njac[k+1][3][4]; lhs[k][CC][4][4] = tmp2 * fjac[k+1][4][4] - tmp1 * njac[k+1][4][4] - tmp1 * dz5; } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(KMAX) and rhs'(KMAX) will be sent to next cell. //--------------------------------------------------------------------- //--------------------------------------------------------------------- // outer most do loops - sweeping in i direction //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[0][j][i] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] ); //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- for (k = 1; k <= ksize-1; k++) { //------------------------------------------------------------------- // subtract A*lhs_vector(k-1) from lhs_vector(k) // // rhs(k) = rhs(k) - A*rhs(k-1) //------------------------------------------------------------------- matvec_sub(lhs[k][AA], rhs[k-1][j][i], rhs[k][j][i]); //------------------------------------------------------------------- // B(k) = B(k) - C(k-1)*A(k) // matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k) //------------------------------------------------------------------- matmul_sub(lhs[k][AA], lhs[k-1][CC], lhs[k][BB]); //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs //------------------------------------------------------------------- binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] ); } //--------------------------------------------------------------------- // Now finish up special cases for last cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) //--------------------------------------------------------------------- matvec_sub(lhs[ksize][AA], rhs[ksize-1][j][i], rhs[ksize][j][i]); //--------------------------------------------------------------------- // B(ksize) = B(ksize) - C(ksize-1)*A(ksize) // matmul_sub(AA,i,j,ksize,c, // $ CC,i,j,ksize-1,c,BB,i,j,ksize) //--------------------------------------------------------------------- matmul_sub(lhs[ksize][AA], lhs[ksize-1][CC], lhs[ksize][BB]); //--------------------------------------------------------------------- // multiply rhs(ksize) by b_inverse(ksize) and copy to rhs //--------------------------------------------------------------------- binvrhs( lhs[ksize][BB], rhs[ksize][j][i] ); //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // back solve: if last cell, then generate U(ksize)=rhs(ksize) // else assume U(ksize) is loaded in un pack backsub_info // so just use it // after u(kstart) will be sent to next cell //--------------------------------------------------------------------- for (k = ksize-1; k >= 0; k--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[k][CC][n][m]*rhs[k+1][j][i][n]; } } } } //kai k14 = 0; // printf("k14=%p\n",&k14); } if (timeron) timer_stop(t_zsolve); }
utils.h
#ifdef HAVE_CONFIG_H #include <config.h> #endif #include <assert.h> #include "pixman-private.h" /* For 'inline' definition */ #define ARRAY_LENGTH(A) ((int) (sizeof (A) / sizeof ((A) [0]))) /* A primitive pseudorandom number generator, * taken from POSIX.1-2001 example */ extern uint32_t lcg_seed; #ifdef USE_OPENMP #pragma omp threadprivate(lcg_seed) #endif static inline uint32_t lcg_rand (void) { lcg_seed = lcg_seed * 1103515245 + 12345; return ((uint32_t)(lcg_seed / 65536) % 32768); } static inline void lcg_srand (uint32_t seed) { lcg_seed = seed; } static inline uint32_t lcg_rand_n (int max) { return lcg_rand () % max; } static inline uint32_t lcg_rand_N (int max) { uint32_t lo = lcg_rand (); uint32_t hi = lcg_rand () << 15; return (lo | hi) % max; } static inline uint32_t lcg_rand_u32 (void) { /* This uses the 10/11 most significant bits from the 3 lcg results * (and mixes them with the low from the adjacent one). */ uint32_t lo = lcg_rand() >> -(32 - 15 - 11 * 2); uint32_t mid = lcg_rand() << (32 - 15 - 11 * 1); uint32_t hi = lcg_rand() << (32 - 15 - 11 * 0); return (hi ^ mid ^ lo); } /* CRC 32 computation */ uint32_t compute_crc32 (uint32_t in_crc32, const void *buf, size_t buf_len); /* Returns TRUE if running on a little endian system */ pixman_bool_t is_little_endian (void); /* perform endian conversion of pixel data */ void image_endian_swap (pixman_image_t *img); /* Allocate memory that is bounded by protected pages, * so that out-of-bounds access will cause segfaults */ void * fence_malloc (int64_t len); void fence_free (void *data); /* Generate n_bytes random bytes in fence_malloced memory */ uint8_t * make_random_bytes (int n_bytes); /* Return current time in seconds */ double gettime (void); uint32_t get_random_seed (void); /* main body of the fuzzer test */ int fuzzer_test_main (const char *test_name, int default_number_of_iterations, uint32_t expected_checksum, uint32_t (*test_function)(int testnum, int verbose), int argc, const char *argv[]); void fail_after (int seconds, const char *msg); /* If possible, enable traps for floating point exceptions */ void enable_fp_exceptions(void); /* Converts a8r8g8b8 pixels to pixels that * - are not premultiplied, * - are stored in this order in memory: R, G, B, A, regardless of * the endianness of the computer. * It is allowed for @src and @dst to point to the same memory buffer. */ void a8r8g8b8_to_rgba_np (uint32_t *dst, uint32_t *src, int n_pixels); pixman_bool_t write_png (pixman_image_t *image, const char *filename); /* A pair of macros which can help to detect corruption of * floating point registers after a function call. This may * happen if _mm_empty() call is forgotten in MMX/SSE2 fast * path code, or ARM NEON assembly optimized function forgets * to save/restore d8-d15 registers before use. */ #define FLOAT_REGS_CORRUPTION_DETECTOR_START() \ static volatile double frcd_volatile_constant1 = 123451; \ static volatile double frcd_volatile_constant2 = 123452; \ static volatile double frcd_volatile_constant3 = 123453; \ static volatile double frcd_volatile_constant4 = 123454; \ static volatile double frcd_volatile_constant5 = 123455; \ static volatile double frcd_volatile_constant6 = 123456; \ static volatile double frcd_volatile_constant7 = 123457; \ static volatile double frcd_volatile_constant8 = 123458; \ double frcd_canary_variable1 = frcd_volatile_constant1; \ double frcd_canary_variable2 = frcd_volatile_constant2; \ double frcd_canary_variable3 = frcd_volatile_constant3; \ double frcd_canary_variable4 = frcd_volatile_constant4; \ double frcd_canary_variable5 = frcd_volatile_constant5; \ double frcd_canary_variable6 = frcd_volatile_constant6; \ double frcd_canary_variable7 = frcd_volatile_constant7; \ double frcd_canary_variable8 = frcd_volatile_constant8; #define FLOAT_REGS_CORRUPTION_DETECTOR_FINISH() \ assert (frcd_canary_variable1 == frcd_volatile_constant1); \ assert (frcd_canary_variable2 == frcd_volatile_constant2); \ assert (frcd_canary_variable3 == frcd_volatile_constant3); \ assert (frcd_canary_variable4 == frcd_volatile_constant4); \ assert (frcd_canary_variable5 == frcd_volatile_constant5); \ assert (frcd_canary_variable6 == frcd_volatile_constant6); \ assert (frcd_canary_variable7 == frcd_volatile_constant7); \ assert (frcd_canary_variable8 == frcd_volatile_constant8); /* Try to get an aligned memory chunk */ void * aligned_malloc (size_t align, size_t size); void initialize_palette (pixman_indexed_t *palette, uint32_t depth, int is_rgb); typedef struct { double r, g, b, a; } color_t; void round_color (pixman_format_code_t format, color_t *color); typedef struct { pixman_format_code_t format; uint32_t am, rm, gm, bm; uint32_t as, rs, gs, bs; uint32_t aw, rw, gw, bw; } pixel_checker_t; void pixel_checker_init (pixel_checker_t *checker, pixman_format_code_t format); void pixel_checker_split_pixel (const pixel_checker_t *checker, uint32_t pixel, int *a, int *r, int *g, int *b); void pixel_checker_get_max (const pixel_checker_t *checker, color_t *color, int *a, int *r, int *g, int *b); void pixel_checker_get_min (const pixel_checker_t *checker, color_t *color, int *a, int *r, int *g, int *b); pixman_bool_t pixel_checker_check (const pixel_checker_t *checker, uint32_t pixel, color_t *color);
convolution_1x1_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_packn_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * packn; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _val = vle16_v_f16m1(r0, vl); vse16_v_f16m1(outptr, _val, vl); r0 += packn * 2; outptr += packn; } r0 += tailstep; } } conv1x1s1_sgemm_packn_fp16sa_rvv(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
process.h
/* * @Author: Philippe Dales * @Date: 2018-07-26 14:26:23 * @Last Modified by: Philippe Dales * @Last Modified time: 2018-07-26 14:26:23 */ /* Signal processing functions. */ #ifndef PROCESS_H #define PROCESS_H #define _USE_MATH_DEFINES #include <cmath> #include <iostream> #include <vector> #include <array> #include <algorithm> #include <fftw3.h> // #include <queue> // #include <random> #include <thread> #include <functional> #include <chrono> // #include <omp.h> #include "xseis/structures.h" // typedef std::pair<float, std::array<float, 3> > vpair; // typedef std::priority_queue<vpair, std::vector<vpair>, std::greater<vpair>> fe_queue; namespace process { // template<typename T> // T max(T* begin, T* end) { // return *std::max_element(begin, end); // } // template<typename T> // T min(T* begin, T* end) { // return *std::min_element(begin, end); // } // template<typename T> // size_t argmax(T* begin, T* end) { // return std::distance(begin, std::max_element(begin, end)); // } template<typename Container> float max(Container& data) { return *std::max_element(data.begin(), data.end()); } template<typename Container> float min(Container& data) { return *std::min_element(data.begin(), data.end()); } // template<typename T> // T min(T* begin, T* end) { // return *std::min_element(begin, end); // } template<typename Container> size_t argmax(Container& data) { return std::distance(data.begin(), std::max_element(data.begin(), data.end())); } inline float AngleBetweenPoints(float* a, float*b) { return std::atan((a[1] - b[1]) / (a[0] - b[0])); // return std::atan2(a[1] - b[1], a[0] - b[0]); } inline float DistCartesian(float* a, float* b) { float dx = a[0] - b[0]; float dy = a[1] - b[1]; float dz = a[2] - b[2]; return std::sqrt(dx * dx + dy * dy + dz * dz); } inline float DistCartesian2D(float* a, float* b) { float dx = a[0] - b[0]; float dy = a[1] - b[1]; return std::sqrt(dx * dx + dy * dy); } float DistDiff(float* a, float* b, float* c) { return DistCartesian(a, c) - DistCartesian(b, c); } uint mod_floor(int a, int n) { return ((a % n) + n) % n; } Vector<fftwf_complex> BuildPhaseShiftVec(size_t const nfreq, int const nshift) { auto v = Vector<fftwf_complex>(nfreq); // std::vector<fftwf_complex> v(nfreq); float const fstep = 0.5 / (nfreq - 1); float const factor = nshift * 2 * M_PI * fstep; for(size_t i = 0; i < nfreq; ++i) { v[i][0] = std::cos(i * factor); v[i][1] = std::sin(i * factor); } return v; } // Mutiply sig1 by sig2 (x + yi)(u + vi) = (xu-yv) + (xv+yu)i // x + yi = s1[0] + s1[1]i // u + vi = s2[0] + s2[1]i #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) void Convolve(fftwf_complex const* const sig2, fftwf_complex* const sig1, uint32_t const nfreq) { float tmp; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ tmp = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; sig1[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; sig1[i][0] = tmp; } } #pragma omp declare simd aligned(sig1, sig2, out:MEM_ALIGNMENT) inline void Convolve(fftwf_complex const* const sig1, fftwf_complex const* const sig2, fftwf_complex* const out, uint32_t const nfreq) { #pragma omp simd aligned(sig1, sig2, out:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ out[i][0] = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; out[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; } } #pragma omp declare simd aligned(data, stack:MEM_ALIGNMENT) inline void Accumulate(fftwf_complex const* const data, fftwf_complex* const stack, uint32_t const npts) { #pragma omp simd aligned(data, stack:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { stack[i][0] += data[i][0]; stack[i][1] += data[i][1]; } } #pragma omp declare simd aligned(data, stack:MEM_ALIGNMENT) inline void Accumulate(float const* const data, float* const stack, uint32_t const npts) { #pragma omp simd aligned(data, stack:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { stack[i] += data[i]; } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) void Whiten(fftwf_complex* const sig, uint32_t const npts) { #pragma omp simd aligned(sig:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { float abs = std::sqrt(sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); sig[i][0] /= abs; sig[i][1] /= abs; } } #pragma omp declare simd aligned(sig, out:MEM_ALIGNMENT) void Absolute(fftwf_complex const* const sig, float* out, uint32_t const npts) { #pragma omp simd aligned(sig, out:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { out[i] = std::sqrt(sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) void Absolute(float* sig, uint32_t const npts) { #pragma omp simd aligned(sig:MEM_ALIGNMENT) for(uint32_t i = 0; i < npts; ++i) { sig[i] = std::abs(sig[i]); } } // #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) // void Convolve(fftwf_complex* sig1, fftwf_complex* sig2, uint32_t const nfreq) // { // float tmp; // #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) // for (uint32_t i = 0; i < nfreq; ++i){ // tmp = sig1[i][0] * sig2[i][0] - sig1[i][1] * sig2[i][1]; // sig1[i][1] = sig1[i][0] * sig2[i][1] + sig1[i][1] * sig2[i][0]; // sig1[i][0] = tmp; // } // } // Cross-correlate complex signals, cc(f) = s1(f) x s2*(f) #pragma omp declare simd aligned(sig1, sig2, out:MEM_ALIGNMENT) void XCorr(fftwf_complex const* const sig1, fftwf_complex const* const sig2, fftwf_complex* const out, uint32_t const nfreq) { #pragma omp simd aligned(sig1, sig2, out:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ out[i][0] = sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]; out[i][1] = sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]; } } #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float DotProductEnergy(float const* const sig1, float const* const sig2, uint32_t const npts) { float result = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < npts; ++i){ // result += sig1[0] * sig2[0]; result += (sig1[0] * sig2[0]) * (sig1[0] * sig2[0]); } return result; } #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float DotProduct(float const* const sig1, float const* const sig2, uint32_t const npts) { float result = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < npts; ++i){ result += sig1[0] * sig2[0]; } return result; } // // Cross-correlate signal pairs of fdata and output to fdata_cc // void XCorrPairs(Array2D<fftwf_complex>& fdata, Array2D<uint16_t>& ckeys, Array2D<fftwf_complex>& fdata_cc) // { // uint32_t nfreq = fdata.ncol_; // #pragma omp for // for (size_t i = 0; i < ckeys.nrow_; ++i) // { // // std::cout << "npair: " << i << '\n'; // XCorr(fdata.row(ckeys(i, 0)), fdata.row(ckeys(i, 1)), // fdata_cc.row(i), nfreq); // } // } // #pragma omp declare simd aligned(data:MEM_ALIGNMENT) template <typename T, typename F> void ApplyFuncToRows(T *__restrict__ data, size_t nsig, size_t npts, F* func){ // Generic map function // #pragma omp for simd aligned(data:MEM_ALIGNMENT) for (size_t i = 0; i < nsig; i++) { (*func)(data + (i * npts), npts); } } template <typename T, typename F> void ApplyFuncToRows(Array2D<T>& data, F* func){ ApplyFuncToRows(data.data_, data.nrow_, data.ncol_, func); } Vector<float> BuildFreqFilter(std::vector<float>& corner_freqs, uint nfreq, float sr) { float fsr = (nfreq * 2 - 1) / sr; // printf("nfreq: %u, FSR: %.4f\n", nfreq, fsr); std::vector<uint32_t> cx; for(auto&& cf : corner_freqs) { cx.push_back(static_cast<uint32_t>(cf * fsr + 0.5)); // printf("cf/fsr %.2f, %.5f\n", cf, fsr); } // printf("filt corner indexes \n"); // for(auto&& c : cx) { // // printf("cx/ cast: %.3f, %u\n", cx, (uint32_t)cx); // printf("--%u--", c); // } // printf("\n"); // whiten corners: cutmin--porte1---porte2--cutmax auto filter = Vector<float>(nfreq); filter.fill(0); // int wlen = porte1 - cutmin; float cosm_left = M_PI / (2. * (cx[1] - cx[0])); // left hand taper for (uint i = cx[0]; i < cx[1]; ++i) { filter[i] = std::pow(std::cos((cx[1] - (i + 1) ) * cosm_left), 2.0); } // setin middle freqs amp = 1 for (uint i = cx[1]; i < cx[2]; ++i) { filter[i] = 1; } float cosm_right = M_PI / (2. * (cx[3] - cx[2])); // right hand taper for (uint i = cx[2]; i < cx[3]; ++i) { filter[i] = std::pow(std::cos((i - cx[2]) * cosm_right), 2.0); } return filter; } void ApplyFreqFilterReplace(float (*fdata)[2], uint const nfreq, Vector<float>& filter) { float angle; for (uint i = 0; i < filter.size_; ++i) { if(filter[i] == 0) { fdata[i][0] = 0; fdata[i][1] = 0; } else { angle = std::atan2(fdata[i][1], fdata[i][0]); fdata[i][0] = filter[i] * std::cos(angle); fdata[i][1] = filter[i] * std::sin(angle); } } } void ApplyFreqFilterMultiply(float (*fdata)[2], uint nfreq, Vector<float>& filter) { float angle; for (uint i = 0; i < filter.size_; ++i) { if(filter[i] == 0) { fdata[i][0] = 0; fdata[i][1] = 0; } else { angle = std::atan2(fdata[i][1], fdata[i][0]); fdata[i][0] *= filter[i] * std::cos(angle); fdata[i][1] *= filter[i] * std::sin(angle); } } } void square_signal(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i) { sig[i] = sig[i] * sig[i]; } } void root_signal(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i) { sig[i] = std::sqrt(sig[i]); } } float rms_energy(float *sig, size_t npts) { // np.sqrt(np.mean(data ** 2, axis=axis)) float square_sum = 0; for (size_t i = 0; i < npts; ++i){ square_sum += sig[i] * sig[i]; } return std::sqrt(square_sum / npts); } void clip(float *sig, size_t npts, float thresh){ for (size_t i = 0; i < npts; ++i){ if (sig[i] > thresh){sig[i] = thresh;} else if (sig[i] < -thresh){sig[i] = -thresh;} } } void demean(float *sig, size_t npts) { float mean = 0; for (size_t i = 0; i < npts; ++i){ mean += sig[i]; } mean /= npts; for (size_t i = 0; i < npts; ++i){ sig[i] -= mean; } } void norm_one_bit(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ sig[i] = (sig[i] > 0) - (sig[i] < 0); } } void norm_one_or_zero(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ if(sig[i] <= 0) { sig[i] = 0; } else{ sig[i] = 1; } } } void ExpMovingAverage(float *sig, size_t npts, uint wlen, bool both_ways=false) { float alpha = 2 / (static_cast<float>(wlen) + 1); float beta = 1 - alpha; sig[0] = std::abs(sig[0]); for (size_t i = 1; i < npts; ++i){ sig[i] = alpha * std::abs(sig[i]) + beta * sig[i - 1]; } if(both_ways == true) { for (long i = npts - 2; i >= 0; --i){ sig[i] = alpha * std::abs(sig[i]) + beta * sig[i + 1]; } } } void EMA_NoAbs(float *sig, size_t npts, uint wlen, bool both_ways=false) { float alpha = 2 / (static_cast<float>(wlen) + 1); float beta = 1 - alpha; for (size_t i = 1; i < npts; ++i){ sig[i] = alpha * sig[i] + beta * sig[i - 1]; } if(both_ways == true) { for (long i = npts - 2; i >= 0; --i){ sig[i] = alpha * sig[i] + beta * sig[i + 1]; } } } float median(float *sig, size_t npts) { size_t half = npts / 2; std::nth_element(sig, sig + half, sig + npts); return sig[half]; } // void ExpMovingAverageSquare(float *sig, size_t npts, uint wlen) // { // float alpha = 2 / (static_cast<float>(wlen) + 1); // float beta = 1 - alpha; // sig[0] = sig[0] * sig[0]; // for (size_t i = 1; i < npts; ++i){ // sig[i] = alpha * sig[i] * sig[i] + beta * sig[i - 1]; // } // } // esig[i] = alpha * esig[i] + (1 - alpha) * esig[i - 1] // template<typename T> // bool abs_compare(T a, T b) bool abs_compare(float a, float b) { return (std::abs(a) < std::abs(b)); } void norm_max_abs(float *sig, size_t npts) { float max = *std::max_element(sig, sig + npts, abs_compare); if (max != 0){ for (size_t i = 0; i < npts; ++i){ sig[i] /= max; } } } void zero_around_max(float *sig, size_t npts, size_t wlen) { // size_t amax = std::distance(sig, std::max_element(sig, sig + npts)); // size_t hlen = wlen / 2; // size_t cutmin = std::max(amax - hlen, (size_t) 0); // size_t cutmax = std::min(amax + hlen, (size_t) npts); // for(size_t i = cutmin; i < cutmax; ++i) { // sig[i] = 0; // } long amax = std::distance(sig, std::max_element(sig, sig + npts)); long hlen = wlen / 2; long cutmin = amax - hlen; long cutmax = amax + hlen; if(cutmin >= 0 && cutmax <= npts) { for(size_t i = cutmin; i < cutmax; ++i) { sig[i] = 0; } } else if (cutmin < 0){ for(size_t i = npts + cutmin; i < npts; ++i) { sig[i] = 0; } for(size_t i = 0; i < cutmax; ++i) { sig[i] = 0; } } else if (cutmax > npts){ for(size_t i = cutmin; i < npts; ++i) { sig[i] = 0; } for(size_t i = 0; i < cutmax - npts; ++i) { sig[i] = 0; } } } void absolute(float *sig, size_t npts) { for (size_t i = 0; i < npts; ++i){ sig[i] = std::abs(sig[i]); } } void Roll(float* sig, size_t npts, long nroll) { std::rotate(sig, sig + nroll, sig + npts); } void taper(float *sig, size_t npts, uint len_taper) { float factor = (2 * M_PI) / ((len_taper * 2) - 1); float *sig_end = sig + npts - len_taper; for (size_t i = 0; i < len_taper; ++i) { sig[i] *= 0.5 - 0.5 * std::cos(i * factor); } for (size_t i = 0; i < len_taper; ++i) { sig_end[i] *= 0.5 - 0.5 * std::cos((i + len_taper) * factor); } } template<typename T> float standard_deviation(T *data, size_t size) { float mean = 0; for(size_t i = 0; i < size; ++i) { mean += data[i]; } mean /= size; float var = 0; for(size_t i = 0; i < size; ++i) { var += (data[i] - mean) * (data[i] - mean); } return std::sqrt(var / size); } template<typename T> float mean(T *data, size_t size) { float mean = 0; for(size_t i = 0; i < size; ++i) { mean += data[i]; } mean /= size; return mean; } // void norm_energy(float (*sig)[2], int npts) // { // int nfreq = npts / 2 + 1; // float energy = 0; // for (int i = 0; i < nfreq; ++i) { // energy += (sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]); // } // // printf("energy = %.5f \n", energy); // // printf("nfreq = %d \n", nfreq); // for (int i = 0; i < nfreq; ++i) { // sig[i][0] /= energy; // sig[i][1] /= energy; // } // } void SlidingWinMax(float *sig, size_t npts, size_t wlen) { // Sliding window max abs val smoothin (horribly slow) absolute(sig, npts); if (wlen % 2 == 0){wlen += 1;} size_t hlen = wlen / 2 + 1; float buf[wlen]; size_t buf_idx = 0; // Fill buffer with last WLEN vals of sig std::copy(&sig[npts - wlen], &sig[npts], buf); // Handle edge case with index wrapin via mod function for (size_t i = npts - hlen; i < npts + hlen; ++i) { sig[i % npts] = *std::max_element(buf, buf + wlen); buf[buf_idx] = sig[(i + hlen) % npts]; buf_idx = (buf_idx + 1) % wlen; } // handle non-edge case for (size_t i = hlen; i < npts - hlen; ++i) { sig[i] = *std::max_element(buf, buf + wlen); buf[buf_idx] = sig[i + hlen]; buf_idx = (buf_idx + 1) % wlen; } } void Multiply(float *sig, size_t npts, float val){ for (size_t i = 0; i < npts; ++i){ sig[i] *= val; } } void Multiply(fftwf_complex* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i][0] *= val; data[i][1] *= val; } } template<typename Container> void Multiply(Container& data, float val) { Multiply(data.data_, data.size_, val); } void Fill(fftwf_complex* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i][0] = val; data[i][1] = val; } } void Fill(float* data, size_t npts, float val) { for(size_t i = 0; i < npts; ++i) { data[i] = val; } } void Fill(Vector<fftwf_complex>& data, float val) { for(size_t i = 0; i < data.size_; ++i) { data[i][0] = val; data[i][1] = val; } } void Fill(Vector<float>& data, float val) { for(size_t i = 0; i < data.size_; ++i) { data[i] = val; } } void Copy(fftwf_complex const *in, size_t npts, fftwf_complex *out) { std::copy(&(in)[0][0], &(in + npts)[0][0], &out[0][0]); } void Copy(float const *in, size_t npts, float *out) { std::copy(in, in + npts, out); } void Subtract(fftwf_complex const *data, fftwf_complex *data_mod, size_t npts) { for(size_t i = 0; i < npts; ++i) { data_mod[i][0] -= data[i][0]; data_mod[i][1] -= data[i][1]; } } #pragma omp declare simd aligned(sig:MEM_ALIGNMENT) float Energy(const fftwf_complex *sig, uint32_t const nfreq) { // E = 1/N sum(|x(f)**2|) float tmp = 0; #pragma omp simd aligned(sig:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ tmp += sig[i][0] * sig[i][0] + sig[i][1] * sig[i][1]; } return tmp / static_cast<float>(nfreq); } // Cross-correlate complex signals, cc(f) = s1(f) x s2*(f) #pragma omp declare simd aligned(sig1, sig2:MEM_ALIGNMENT) float XCorrEnergy(fftwf_complex const *sig1, fftwf_complex const *sig2, uint32_t const nfreq) { float a, b; float sum = 0; #pragma omp simd aligned(sig1, sig2:MEM_ALIGNMENT) for (uint32_t i = 0; i < nfreq; ++i){ a = (sig1[i][0] * sig2[i][0]) + (sig1[i][1] * sig2[i][1]); b = (sig1[i][0] * sig2[i][1]) - (sig1[i][1] * sig2[i][0]); sum += (a * a) + (b * b); // a = sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]; // b = sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]; // sum += (sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]) * (sig1[i][0] * sig2[i][0] + sig1[i][1] * sig2[i][1]) + (sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]) * (sig1[i][0] * sig2[i][1] - sig1[i][1] * sig2[i][0]); } return sum; } // def get_pt(index, shape, spacing, origin): // nx, ny, nz = shape // # nx, ny, nz = spacing // iz = index % nz // iy = ((index - iz) / nz) % ny // ix = index / (nz * ny) // loc = np.array([ix, iy, iz], dtype=np.float32) * spacing + origin // return loc std::vector<float> get_point(size_t index, int* gdef){ int* shape = &gdef[0]; int* origin = &gdef[3]; int spacing = gdef[6]; int nx = shape[0]; int ny = shape[1]; int nz = shape[2]; int iz = index % nz; int iy = ((index - iz) / nz) % ny; int ix = index / (nz * ny); std::vector<float> v(3); v[0] = ix * spacing + origin[0]; v[1] = iy * spacing + origin[1]; v[2] = iz * spacing + origin[2]; return v; } } #endif
3.norace5.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N][N]; for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) #pragma omp parallel for for (int k = 1; k < N; k++) A[i][j][k] = A[i - 1][j][k]; } // CHECK: Region is Data Race Free. // END
GB_unaryop__ainv_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_fp32_fp64 // op(A') function: GB_tran__ainv_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_fp32_fp64 ( float *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__gt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int16) // A.*B function (eWiseMult): GB (_AemultB_08__gt_int16) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int16) // A.*B function (eWiseMult): GB (_AemultB_04__gt_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int16) // A*D function (colscale): GB (_AxD__gt_int16) // D*A function (rowscale): GB (_DxB__gt_int16) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int16) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int16) // C=scalar+B GB (_bind1st__gt_int16) // C=scalar+B' GB (_bind1st_tran__gt_int16) // C=A+scalar GB (_bind2nd__gt_int16) // C=A'+scalar GB (_bind2nd_tran__gt_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT16 || GxB_NO_GT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv_dw_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int convdw3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int convdw3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv_dw_run_int8(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int ret = -1; switch(param->stride_h) { case 1: ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; case 2: ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h); } return ret; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* weight_tensor; struct tensor* bias_tensor = NULL; struct tensor* output_tensor = NULL; int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; /* set the input data and shape again, in case of reshape or dynamic shape */ input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; struct conv_priv_info* conv_priv_info = ( struct conv_priv_info* )exec_node->ops_priv; int ret = -1; if (exec_graph->mode == TENGINE_MODE_FP32) ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity); else if (exec_graph->mode == TENGINE_MODE_INT8) ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); else { TLOG_ERR("hcl conv run failed\n"); return -1; } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int in_c = input_tensor->dims[1] / group; int out_c = output_tensor->dims[1] / group; /* todo support uint8 */ if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8)) return 0; if (kernel_h != kernel_w || input_tensor->dims[0] > 1) return 0; if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_dw_hcl_x86_op() { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_dw_hcl_x86_op() { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
couple_waves_omp45.c
/* * couple_waves_omp45.c * * Couple 3 waves contained in C99 complex arrays. * * OpenMP 4.5 version intended for use with Nvidia boards and similar devices. * */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "mytypes.h" #include "light.h" #include "pf3dbench.h" #include "util.h" #include "runparm.h" #include "pf3dbenchvars.h" void couple_omp45(rcomplex * restrict t0, rcomplex * restrict t2, rcomplex * restrict denp) { start_omp_time(); #pragma omp target map(to:denp[0:ngtot]) map(tofrom:t0[0:ngtot],t2[0:ngtot]) #pragma omp teams num_teams(num_teams) { real c20, cslamt, snlamt, r_zlam, r, fratio; real r_fratio, cratio, zac2; double zlam, c2re, c2im; rcomplex a0t, a2t, c2, z3; int ix, iy, iz, myteam; long it0; cratio= 1.0e3; fratio = SQRT(0.9); r_fratio = ONE/fratio; c20 = 0.25 * cratio * r_fratio; myteam = omp_get_team_num(); /* the static directive is to encourage coalesced loads */ #ifdef _OPENMP #pragma omp distribute parallel for COLLAPSE(3) schedule(static,1) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, ix, iy, iz, it0, zac2, c2re, c2im) #endif for (iz= 0; iz < nzl; iz++) { for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { it0= CELTNDX(ix,iy,iz); c2 = c20 * denp[it0]; c2re = CREAL(c2); c2im = CIMAG(c2); /* compute lamda = sqrt(|c2|^2) using doubles to avoid underflow. */ zlam = c2re*c2re + c2im*c2im + 1.0e-34; zlam = sqrt(zlam); snlamt = SIN(zlam * dt * HALF); cslamt = COS(zlam * dt * HALF); a0t = t0[it0]; a2t = t2[it0] * fratio; /* normalize c2 */ r_zlam= ONE/(real)zlam; c2 *= r_zlam; /* compute the square of c2 after scaling */ zac2 = zabs2(c2); /* compute new A0 */ z3 = c2 * a2t * snlamt ; t0[it0] = a0t * cslamt - IREAL * z3; /* compute new A2 */ r = zac2 * cslamt; z3 = CONJ(c2) * a0t * snlamt; t2[it0] = ( a2t * r - IREAL * z3 ) * r_fratio; } /* end x for-loop */ } /* end y for-loop */ } /* end of distribute z loop */ } /* end of OMP target */ stop_omp_time(); } void couple_premap(rcomplex * restrict t0, rcomplex * restrict t2, rcomplex * restrict denp) { #pragma omp target enter data map(to:denp[0:ngtot],t0[0:ngtot],t2[0:ngtot]) } void couple_unmap(rcomplex * restrict t0, rcomplex * restrict t2, rcomplex * restrict denp) { #pragma omp target exit data map(release:denp[0:ngtot]) map(from:t0[0:ngtot],t2[0:ngtot]) } void couple_omp45_pre(rcomplex * restrict t0, rcomplex * restrict t2, rcomplex * restrict denp) { start_omp_time(); #pragma omp target teams num_teams(num_teams) { real c20, cslamt, snlamt, r_zlam, r, fratio; real r_fratio, cratio, zac2; double zlam, c2re, c2im; rcomplex a0t, a2t, c2, z3, z4; int ix, iy, iz, myteam; long it0; cratio= 1.0e3; fratio = SQRT(0.9); r_fratio = ONE/fratio; c20 = 0.25 * cratio * r_fratio; myteam = omp_get_team_num(); /* the static directive is to encourage coalesced loads */ #ifdef _OPENMP #pragma omp distribute parallel for COLLAPSE(3) schedule(static,1) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, ix, iy, iz, it0, zac2, c2re, c2im) #endif for (iz= 0; iz < nzl; iz++) { for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { it0= CELTNDX(ix,iy,iz); c2 = c20 * denp[it0]; c2re = CREAL(c2); c2im = CIMAG(c2); /* compute lamda = sqrt(|c2|^2) using doubles to avoid underflow. */ zlam = c2re*c2re + c2im*c2im + 1.0e-34; zlam = sqrt(zlam); snlamt = SIN(zlam * dt * HALF); cslamt = COS(zlam * dt * HALF); a0t = t0[it0]; a2t = t2[it0] * fratio; /* normalize c2 */ r_zlam= ONE/(real)zlam; c2 *= r_zlam; /* compute the square of c2 after scaling */ zac2 = zabs2(c2); /* compute new A0 */ z3 = c2 * a2t * snlamt ; t0[it0] = a0t * cslamt - IREAL * z3; /* compute new A2 */ r = zac2 * cslamt; z3 = CONJ(c2) * a0t * snlamt; t2[it0] = ( a2t * r - IREAL * z3 ) * r_fratio; } /* end x for-loop */ } /* end y for-loop */ } /* end of distribute z loop */ } /* end of OMP target */ stop_omp_time(); } void couple_omp45_pre_simd(rcomplex * restrict t0, rcomplex * restrict t2, rcomplex * restrict denp) { start_omp_time(); #pragma omp target teams num_teams(num_teams) { real c20, cslamt, snlamt, r_zlam, r, fratio; real r_fratio, cratio, zac2; double zlam, c2re, c2im; rcomplex a0t, a2t, c2, z3; int ix, iy, iz, myteam; long it0; cratio= 1.0e3; fratio = SQRT(0.9); r_fratio = ONE/fratio; c20 = 0.25 * cratio * r_fratio; myteam = omp_get_team_num(); #ifdef _OPENMP #pragma omp distribute private(iz) #endif for (iz= 0; iz < nzl; iz++) { /* the static directive is to encourage coalesced loads */ #pragma omp parallel for simd COLLAPSE(2) schedule(static,1) private(c2, a0t, a2t, zlam, r_zlam, snlamt, cslamt, r, z3, it0, zac2, c2re, c2im) for (iy=0; iy<nyl; iy++) { for (ix=0; ix<nxl; ix++) { it0= CELTNDX(ix,iy,iz); c2 = c20 * denlw[it0]; c2re = CREAL(c2); c2im = CIMAG(c2); /* compute lamda = sqrt(|c2|^2) using doubles to avoid underflow. */ zlam = c2re*c2re + c2im*c2im + 1.0e-34; zlam = sqrt(zlam); snlamt = SIN(zlam * dt * HALF); cslamt = COS(zlam * dt * HALF); a0t = t0[it0]; a2t = t2[it0] * fratio; /* normalize c2 */ r_zlam= ONE/(real)zlam; c2 *= r_zlam; /* compute the square of c2 after scaling */ zac2 = zabs2(c2); /* compute new A0 */ z3 = c2 * a2t * snlamt ; t0[it0] = a0t * cslamt - IREAL * z3; /* compute new A2 */ r = zac2 * cslamt; z3 = CONJ(c2) * a0t * snlamt; t2[it0] = ( a2t * r - IREAL * z3 ) * r_fratio; } /* end x for-loop */ } /* end y for-loop */ } /* end of distribute z loop */ } /* end of OMP target */ stop_omp_time(); }
GB_unop__creal_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__creal_fp64_fc64) // op(A') function: GB (_unop_tran__creal_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: GxB_FC64_t cij = (aij) // unaryop: cij = creal (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = creal (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = (aij) ; \ Cx [pC] = creal (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_CREAL || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__creal_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = creal (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = (aij) ; Cx [p] = creal (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__creal_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mrcore.c
/*************************************************************************** * Copyright 2013 CertiVox UK Ltd. * * This file is part of CertiVox MIRACL Crypto SDK. * * The CertiVox MIRACL Crypto SDK provides developers with an * extensive and efficient set of cryptographic functions. * For further information about its features and functionalities please * refer to http://www.certivox.com * * * The CertiVox MIRACL Crypto SDK is free software: you can * redistribute it and/or modify it under the terms of the * GNU Affero General Public License as published by the * Free Software Foundation, either version 3 of the License, * or (at your option) any later version. * * * The CertiVox MIRACL Crypto SDK is distributed in the hope * that it will be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Affero General Public License for more details. * * * You should have received a copy of the GNU Affero General Public * License along with CertiVox MIRACL Crypto SDK. * If not, see <http://www.gnu.org/licenses/>. * * You can be released from the requirements of the license by purchasing * a commercial license. Buying such a license is mandatory as soon as you * develop commercial activities involving the CertiVox MIRACL Crypto SDK * without disclosing the source code of your own applications, or shipping * the CertiVox MIRACL Crypto SDK with a closed source product. * * ***************************************************************************/ /* * * MIRACL Core module - contains initialisation code and general purpose * utilities * mrcore.c * * Space can be saved by removing unneeded functions (mr_and ?) * */ #include "miracl.h" #include <stdlib.h> #include <string.h> #ifdef MR_FP #include <math.h> #endif /*** Multi-Threaded Support ***/ #ifndef MR_GENERIC_MT #ifdef MR_OPENMP_MT #include <omp.h> #define MR_MIP_EXISTS miracl *mr_mip; #pragma omp threadprivate(mr_mip) miracl *get_mip() { return mr_mip; } void mr_init_threading() { } void mr_end_threading() { } #endif #ifdef MR_WINDOWS_MT #include <windows.h> DWORD mr_key; miracl *get_mip() { return (miracl *)TlsGetValue(mr_key); } void mr_init_threading() { mr_key=TlsAlloc(); } void mr_end_threading() { TlsFree(mr_key); } #endif #ifdef MR_UNIX_MT #include <pthread.h> pthread_key_t mr_key; miracl *get_mip() { return (miracl *)pthread_getspecific(mr_key); } void mr_init_threading() { pthread_key_create(&mr_key,(void(*)(void *))NULL); } void mr_end_threading() { pthread_key_delete(mr_key); } #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT #ifdef MR_STATIC miracl mip; miracl *mr_mip=&mip; #else miracl *mr_mip=NULL; /* MIRACL's one and only global variable */ #endif #define MR_MIP_EXISTS miracl *get_mip() { return (miracl *)mr_mip; } #endif #endif #endif #ifdef MR_MIP_EXISTS void set_mip(miracl *mip) { mr_mip=mip; } #endif #endif /* See Advanced Windows by Jeffrey Richter, Chapter 12 for methods for creating different instances of this global for each executing thread when using Windows '95/NT */ #ifdef MR_STATIC #if MIRACL==8 static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,0}; #else static const int mr_small_primes[]= {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103, 107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211, 223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331, 337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449, 457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587, 593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709, 719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853, 857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991, 997,0}; #endif #endif #ifndef MR_STRIPPED_DOWN #ifndef MR_NO_STANDARD_IO static char *names[] = {(char *)"your program",(char *)"innum",(char *)"otnum",(char *)"jack",(char *)"normalise", (char *)"multiply",(char *)"divide",(char *)"incr",(char *)"decr",(char *)"premult", (char *)"subdiv",(char *)"fdsize",(char *)"egcd",(char *)"cbase", (char *)"cinnum",(char *)"cotnum",(char *)"nroot",(char *)"power", (char *)"powmod",(char *)"bigdig",(char *)"bigrand",(char *)"nxprime",(char *)"isprime", (char *)"mirvar",(char *)"mad",(char *)"multi_inverse",(char *)"putdig", (char *)"add",(char *)"subtract",(char *)"mirsys",(char *)"xgcd", (char *)"fpack",(char *)"dconv",(char *)"mr_shift",(char *)"mround",(char *)"fmul", (char *)"fdiv",(char *)"fadd",(char *)"fsub",(char *)"fcomp",(char *)"fconv", (char *)"frecip",(char *)"fpmul",(char *)"fincr",(char *)"",(char *)"ftrunc", (char *)"frand",(char *)"sftbit",(char *)"build",(char *)"logb2",(char *)"expint", (char *)"fpower",(char *)"froot",(char *)"fpi",(char *)"fexp",(char *)"flog",(char *)"fpowf", (char *)"ftan",(char *)"fatan",(char *)"fsin",(char *)"fasin",(char *)"fcos",(char *)"facos", (char *)"ftanh",(char *)"fatanh",(char *)"fsinh",(char *)"fasinh",(char *)"fcosh", (char *)"facosh",(char *)"flop",(char *)"gprime",(char *)"powltr",(char *)"fft_mult", (char *)"crt_init",(char *)"crt",(char *)"otstr",(char *)"instr",(char *)"cotstr",(char *)"cinstr",(char *)"powmod2", (char *)"prepare_monty",(char *)"nres",(char *)"redc",(char *)"nres_modmult",(char *)"nres_powmod", (char *)"nres_moddiv",(char *)"nres_powltr",(char *)"divisible",(char *)"remain", (char *)"fmodulo",(char *)"nres_modadd",(char *)"nres_modsub",(char *)"nres_negate", (char *)"ecurve_init",(char *)"ecurve_add",(char *)"ecurve_mult", (char *)"epoint_init",(char *)"epoint_set",(char *)"epoint_get",(char *)"nres_powmod2", (char *)"nres_sqroot",(char *)"sqroot",(char *)"nres_premult",(char *)"ecurve_mult2", (char *)"ecurve_sub",(char *)"trial_division",(char *)"nxsafeprime",(char *)"nres_lucas",(char *)"lucas", (char *)"brick_init",(char *)"pow_brick",(char *)"set_user_function", (char *)"nres_powmodn",(char *)"powmodn",(char *)"ecurve_multn", (char *)"ebrick_init",(char *)"mul_brick",(char *)"epoint_norm",(char *)"nres_multi_inverse",(char *)"", (char *)"nres_dotprod",(char *)"epoint_negate",(char *)"ecurve_multi_add", (char *)"ecurve2_init",(char *)"",(char *)"epoint2_set",(char *)"epoint2_norm",(char *)"epoint2_get", (char *)"epoint2_comp",(char *)"ecurve2_add",(char *)"epoint2_negate",(char *)"ecurve2_sub", (char *)"ecurve2_multi_add",(char *)"ecurve2_mult",(char *)"ecurve2_multn",(char *)"ecurve2_mult2", (char *)"ebrick2_init",(char *)"mul2_brick",(char *)"prepare_basis",(char *)"strong_bigrand", (char *)"bytes_to_big",(char *)"big_to_bytes",(char *)"set_io_buffer_size", (char *)"epoint_getxyz",(char *)"epoint_double_add",(char *)"nres_double_inverse", (char *)"double_inverse",(char *)"epoint_x",(char *)"hamming",(char *)"expb2",(char *)"bigbits", (char *)"nres_lazy",(char *)"zzn2_imul",(char *)"nres_double_modadd",(char *)"nres_double_modsub", /*155*/(char *)"",(char *)"zzn2_from_int",(char *)"zzn2_negate",(char *)"zzn2_conj",(char *)"zzn2_add", (char *)"zzn2_sub",(char *)"zzn2_smul",(char *)"zzn2_mul",(char *)"zzn2_inv",(char *)"zzn2_timesi",(char *)"zzn2_powl", (char *)"zzn2_from_bigs",(char *)"zzn2_from_big",(char *)"zzn2_from_ints", (char *)"zzn2_sadd",(char *)"zzn2_ssub",(char *)"zzn2_times_irp",(char *)"zzn2_div2", (char *)"zzn3_from_int",(char *)"zzn3_from_ints",(char *)"zzn3_from_bigs", (char *)"zzn3_from_big",(char *)"zzn3_negate",(char *)"zzn3_powq",(char *)"zzn3_init", (char *)"zzn3_add",(char *)"zzn3_sadd",(char *)"zzn3_sub",(char *)"zzn3_ssub",(char *)"zzn3_smul", (char *)"zzn3_imul",(char *)"zzn3_mul",(char *)"zzn3_inv",(char *)"zzn3_div2",(char *)"zzn3_timesi", (char *)"epoint_multi_norm",(char *)"mr_jsf",(char *)"epoint2_multi_norm", (char *)"ecn2_compare",(char *)"ecn2_norm",(char *)"ecn2_set",(char *)"zzn2_txx", (char *)"zzn2_txd",(char *)"nres_div2",(char *)"nres_div3",(char *)"zzn2_div3", (char *)"ecn2_setx",(char *)"ecn2_rhs",(char *)"zzn2_qr",(char *)"zzn2_sqrt",(char *)"ecn2_add",(char *)"ecn2_mul2_jsf",(char *)"ecn2_mul", (char *)"nres_div5",(char *)"zzn2_div5",(char *)"zzn2_sqr",(char *)"ecn2_add_sub",(char *)"ecn2_psi",(char *)"invmodp", (char *)"zzn2_multi_inverse",(char *)"ecn2_multi_norm",(char *)"ecn2_precomp",(char *)"ecn2_mul4_gls_v", (char *)"ecn2_mul2",(char *)"ecn2_precomp_gls",(char *)"ecn2_mul2_gls", (char *)"ecn2_brick_init",(char *)"ecn2_mul_brick_gls",(char *)"ecn2_multn",(char *)"zzn3_timesi2", (char *)"nres_complex",(char *)"zzn4_from_int",(char *)"zzn4_negate",(char *)"zzn4_conj",(char *)"zzn4_add",(char *)"zzn4_sadd",(char *)"zzn4_sub",(char *)"zzn4_ssub",(char *)"zzn4_smul",(char *)"zzn4_sqr", (char *)"zzn4_mul",(char *)"zzn4_inv",(char *)"zzn4_div2",(char *)"zzn4_powq",(char *)"zzn4_tx",(char *)"zzn4_imul",(char *)"zzn4_lmul",(char *)"zzn4_from_big", (char *)"ecn2_mult4"}; /* 0 - 243 (244 in all) */ #endif #endif #ifdef MR_NOASM /* C only versions of muldiv/muldvd/muldvd2/muldvm */ /* Note that mr_large should be twice the size of mr_small */ mr_small muldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)(MR_LROUND(p/m)); *rp=(mr_small)(p-(mr_large)q*m); return q; } #ifdef MR_FP_ROUNDING mr_small imuldiv(mr_small a,mr_small b,mr_small c,mr_small m,mr_large im,mr_small *rp) { mr_small q; mr_large ldres,p=(mr_large)a*b+c; q=(mr_small)MR_LROUND(p*im); *rp=(mr_small)(p-(mr_large)q*m); return q; } #endif #ifndef MR_NOFULLWIDTH mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { mr_small q; union doubleword dble; dble.h[MR_BOT]=c; dble.h[MR_TOP]=a; q=(mr_small)(dble.d/m); *rp=(mr_small)(dble.d-(mr_large)q*m); return q; } mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+c; *rp=dble.h[MR_BOT]; return dble.h[MR_TOP]; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { union doubleword dble; dble.d=(mr_large)a*b+*c+*rp; *rp=dble.h[MR_BOT]; *c=dble.h[MR_TOP]; } #endif #endif #ifdef MR_NOFULLWIDTH /* no FULLWIDTH working, so supply dummies */ /* mr_small muldvd(mr_small a,mr_small b,mr_small c,mr_small *rp) { return (mr_small)0; } mr_small muldvm(mr_small a,mr_small c,mr_small m,mr_small *rp) { return (mr_small)0; } void muldvd2(mr_small a,mr_small b,mr_small *c,mr_small *rp) { } */ #endif #ifndef MR_NO_STANDARD_IO static void mputs(char *s) { /* output a string */ int i=0; while (s[i]!=0) fputc((int)s[i++],stdout); } #endif void mr_berror(_MIPD_ int nerr) { /* Big number error routine */ #ifndef MR_STRIPPED_DOWN int i; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERCON) { mr_mip->ERNUM=nerr; return; } #ifndef MR_NO_STANDARD_IO #ifndef MR_STRIPPED_DOWN mputs((char *)"\nMIRACL error from routine "); if (mr_mip->depth<MR_MAXDEPTH) mputs(names[mr_mip->trace[mr_mip->depth]]); else mputs((char *)"???"); fputc('\n',stdout); for (i=mr_mip->depth-1;i>=0;i--) { mputs((char *)" called from "); if (i<MR_MAXDEPTH) mputs(names[mr_mip->trace[i]]); else mputs((char *)"???"); fputc('\n',stdout); } switch (nerr) { case 1 : mputs((char *)"Number base too big for representation\n"); break; case 2 : mputs((char *)"Division by zero attempted\n"); break; case 3 : mputs((char *)"Overflow - Number too big\n"); break; case 4 : mputs((char *)"Internal result is negative\n"); break; case 5 : mputs((char *)"Input format error\n"); break; case 6 : mputs((char *)"Illegal number base\n"); break; case 7 : mputs((char *)"Illegal parameter usage\n"); break; case 8 : mputs((char *)"Out of space\n"); break; case 9 : mputs((char *)"Even root of a negative number\n"); break; case 10: mputs((char *)"Raising integer to negative power\n"); break; case 11: mputs((char *)"Attempt to take illegal root\n"); break; case 12: mputs((char *)"Integer operation attempted on Flash number\n"); break; case 13: mputs((char *)"Flash overflow\n"); break; case 14: mputs((char *)"Numbers too big\n"); break; case 15: mputs((char *)"Log of a non-positive number\n"); break; case 16: mputs((char *)"Flash to double conversion failure\n"); break; case 17: mputs((char *)"I/O buffer overflow\n"); break; case 18: mputs((char *)"MIRACL not initialised - no call to mirsys()\n"); break; case 19: mputs((char *)"Illegal modulus \n"); break; case 20: mputs((char *)"No modulus defined\n"); break; case 21: mputs((char *)"Exponent too big\n"); break; case 22: mputs((char *)"Unsupported Feature - check mirdef.h\n"); break; case 23: mputs((char *)"Specified double length type isn't double length\n"); break; case 24: mputs((char *)"Specified basis is NOT irreducible\n"); break; case 25: mputs((char *)"Unable to control Floating-point rounding\n"); break; case 26: mputs((char *)"Base must be binary (MR_ALWAYS_BINARY defined in mirdef.h ?)\n"); break; case 27: mputs((char *)"No irreducible basis defined\n"); break; case 28: mputs((char *)"Composite modulus\n"); break; case 29: mputs((char *)"Input/output error when reading from RNG device node\n"); break; default: mputs((char *)"Undefined error\n"); break; } exit(0); #else mputs((char *)"MIRACL error\n"); exit(0); #endif #endif } #ifndef MR_STRIPPED_DOWN void mr_track(_MIPDO_ ) { /* track course of program execution * * through the MIRACL routines */ #ifndef MR_NO_STANDARD_IO int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif for (i=0;i<mr_mip->depth;i++) fputc('-',stdout); fputc('>',stdout); mputs(names[mr_mip->trace[mr_mip->depth]]); fputc('\n',stdout); #endif } #endif #ifndef MR_NO_RAND mr_small brand(_MIPDO_ ) { /* Marsaglia & Zaman random number generator */ int i,k; mr_unsign32 pdiff,t; mr_small r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->lg2b>32) { /* underlying type is > 32 bits. Assume <= 64 bits */ mr_mip->rndptr+=2; if (mr_mip->rndptr<NK-1) { r=(mr_small)mr_mip->ira[mr_mip->rndptr]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[mr_mip->rndptr+1]; return r; } } else { mr_mip->rndptr++; if (mr_mip->rndptr<NK) return (mr_small)mr_mip->ira[mr_mip->rndptr]; } mr_mip->rndptr=0; for (i=0,k=NK-NJ;i<NK;i++,k++) { /* calculate next NK values */ if (k==NK) k=0; t=mr_mip->ira[k]; pdiff=t - mr_mip->ira[i] - mr_mip->borrow; if (pdiff<t) mr_mip->borrow=0; if (pdiff>t) mr_mip->borrow=1; mr_mip->ira[i]=pdiff; } if (mr_mip->lg2b>32) { /* double up */ r=(mr_small)mr_mip->ira[0]; r=mr_shiftbits(r,mr_mip->lg2b-32); r+=(mr_small)mr_mip->ira[1]; return r; } else return (mr_small)(mr_mip->ira[0]); } void irand(_MIPD_ mr_unsign32 seed) { /* initialise random number system */ int i,in; mr_unsign32 t,m=1L; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_mip->borrow=0L; mr_mip->rndptr=0; mr_mip->ira[0]=seed; for (i=1;i<NK;i++) { /* fill initialisation vector */ in=(NV*i)%NK; mr_mip->ira[in]=m; t=m; m=seed-m; seed=t; } for (i=0;i<1000;i++) brand(_MIPPO_ ); /* "warm-up" & stir the generator */ } #endif mr_small mr_shiftbits(mr_small x,int n) { #ifdef MR_FP int i; mr_small dres; if (n==0) return x; if (n>0) { for (i=0;i<n;i++) x=x+x; return x; } n=-n; for (i=0;i<n;i++) x=MR_DIV(x,2.0); return x; #else if (n==0) return x; if (n>0) x<<=n; else x>>=(-n); return x; #endif } mr_small mr_setbase(_MIPD_ mr_small nb) { /* set base. Pack as many digits as * * possible into each computer word */ mr_small temp; #ifdef MR_FP mr_small dres; #endif #ifndef MR_NOFULLWIDTH BOOL fits; int bits; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif fits=FALSE; bits=MIRACL; while (bits>1) { bits/=2; temp=((mr_small)1<<bits); if (temp==nb) { fits=TRUE; break; } if (temp<nb || (bits%2)!=0) break; } if (fits) { mr_mip->apbase=nb; mr_mip->pack=MIRACL/bits; mr_mip->base=0; return 0; } #endif mr_mip->apbase=nb; mr_mip->pack=1; mr_mip->base=nb; #ifdef MR_SIMPLE_BASE return 0; #else if (mr_mip->base==0) return 0; temp=MR_DIV(MAXBASE,nb); while (temp>=nb) { temp=MR_DIV(temp,nb); mr_mip->base*=nb; mr_mip->pack++; } #ifdef MR_FP_ROUNDING mr_mip->inverse_base=mr_invert(mr_mip->base); return mr_mip->inverse_base; #else return 0; #endif #endif } #ifdef MR_FLASH BOOL fit(big x,big y,int f) { /* returns TRUE if x/y would fit flash format of length f */ int n,d; n=(int)(x->len&(MR_OBITS)); d=(int)(y->len&(MR_OBITS)); if (n==1 && x->w[0]==1) n=0; if (d==1 && y->w[0]==1) d=0; if (n+d<=f) return TRUE; return FALSE; } #endif int mr_lent(flash x) { /* return length of big or flash in words */ mr_lentype lx; lx=(x->len&(MR_OBITS)); #ifdef MR_FLASH return (int)((lx&(MR_MSK))+((lx>>(MR_BTS))&(MR_MSK))); #else return (int)lx; #endif } void zero(flash x) { /* set big/flash number to zero */ int i,n; mr_small *g; if (x==NULL) return; #ifdef MR_FLASH n=mr_lent(x); #else n=(x->len&MR_OBITS); #endif g=x->w; for (i=0;i<n;i++) g[i]=0; x->len=0; } void uconvert(_MIPD_ unsigned int n ,big x) { /* convert unsigned integer n to big number format */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_IBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%((mr_small)1<<(MIRACL))); n/=((mr_small)1<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN((mr_small)n,mr_mip->base); n=(unsigned int)((mr_small)n/mr_mip->base); } #endif x->len=m; } void tconvert(_MIPD_ mr_utype n,big x) { mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } x->w[0]=n; x->len=1; x->len|=s; } void convert(_MIPD_ int n ,big x) { /* convert signed integer n to big number format */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } uconvert(_MIPP_ (unsigned int)n,x); x->len|=s; } #ifndef MR_STATIC #ifdef mr_dltype void dlconv(_MIPD_ mr_dltype n,big x) { /* convert double length integer to big number format - rarely needed */ int m; mr_lentype s; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; s=0; if (n<0) { s=MR_MSBIT; n=(-n); } m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH while (n>0) { x->w[m++]=(mr_small)(n%((mr_dltype)1<<(MIRACL))); n/=((mr_dltype)1<<(MIRACL)); } #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=(mr_small)MR_REMAIN(n,mr_mip->base); n/=mr_mip->base; } #endif x->len=(m|s); } #endif void ulgconv(_MIPD_ unsigned long n,big x) { /* convert unsigned long integer to big number format - rarely needed */ int m; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif zero(x); if (n==0) return; m=0; #ifndef MR_SIMPLE_BASE if (mr_mip->base==0) { #endif #ifndef MR_NOFULLWIDTH #if MR_LBITS > MIRACL while (n>0) { x->w[m++]=(mr_small)(n%(1L<<(MIRACL))); n/=(1L<<(MIRACL)); } #else x->w[m++]=(mr_small)n; #endif #endif #ifndef MR_SIMPLE_BASE } else while (n>0) { x->w[m++]=MR_REMAIN(n,mr_mip->base); n=(unsigned long)((mr_small)n/mr_mip->base); } #endif x->len=m; } void lgconv(_MIPD_ long n,big x) { /* convert signed long integer to big number format - rarely needed */ mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (n==0) {zero(x); return;} s=0; if (n<0) { s=MR_MSBIT; n=(-n); } ulgconv(_MIPP_ (unsigned long)n,x); x->len|=s; } flash mirvar(_MIPD_ int iv) { /* initialize big/flash number */ flash x; int align; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(23); if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return NULL; } /* OK, now I control alignment.... */ /* Allocate space for big, the length, the pointer, and the array */ /* Do it all in one memory allocation - this is quicker */ /* Ensure that the array has correct alignment */ x=(big)mr_alloc(_MIPP_ mr_size(mr_mip->nib-1),1); if (x==NULL) { MR_OUT return x; } ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); if (iv!=0) convert(_MIPP_ iv,x); MR_OUT return x; } #endif flash mirvar_mem_variable(char *mem,int index,int sz) { flash x; int align; char *ptr; int offset,r; /* alignment */ offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; x=(big)&mem[offset+mr_size(sz)*index]; ptr=(char *)&x->w; align=(unsigned long)(ptr+sizeof(mr_small *))%sizeof(mr_small); x->w=(mr_small *)(ptr+sizeof(mr_small *)+sizeof(mr_small)-align); return x; } flash mirvar_mem(_MIPD_ char *mem,int index) { /* initialize big/flash number from pre-allocated memory */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return mirvar_mem_variable(mem,index,mr_mip->nib-1); } void set_user_function(_MIPD_ BOOL (*user)(void)) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(111) if (!(mr_mip->active)) { mr_berror(_MIPP_ MR_ERR_NO_MIRSYS); MR_OUT return; } mr_mip->user=user; MR_OUT } #ifndef MR_STATIC #ifndef MR_SIMPLE_IO void set_io_buffer_size(_MIPD_ int len) { int i; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (len<0) return; MR_IN(142) for (i=0;i<mr_mip->IOBSIZ;i++) mr_mip->IOBUFF[i]=0; mr_free(mr_mip->IOBUFF); if (len==0) { MR_OUT return; } mr_mip->IOBSIZ=len; mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ len+1,1); mr_mip->IOBUFF[0]='\0'; MR_OUT } #endif #endif /* Initialise a big from ROM given its fixed length */ BOOL init_big_from_rom(big x,int len,const mr_small *rom,int romsize,int *romptr) { int i; zero(x); x->len=len; for (i=0;i<len;i++) { if (*romptr>=romsize) return FALSE; #ifdef MR_AVR x->w[i]=pgm_read_byte_near(&rom[*romptr]); #else x->w[i]=rom[*romptr]; #endif (*romptr)++; } mr_lzero(x); return TRUE; } /* Initialise an elliptic curve point from ROM */ BOOL init_point_from_rom(epoint *P,int len,const mr_small *rom,int romsize,int *romptr) { if (!init_big_from_rom(P->X,len,rom,romsize,romptr)) return FALSE; if (!init_big_from_rom(P->Y,len,rom,romsize,romptr)) return FALSE; P->marker=MR_EPOINT_NORMALIZED; return TRUE; } #ifdef MR_GENERIC_AND_STATIC miracl *mirsys(miracl *mr_mip,int nd,mr_small nb) #else miracl *mirsys(int nd,mr_small nb) #endif { /* Initialize MIRACL system to * * use numbers to base nb, and * * nd digits or (-nd) bytes long */ /* In these cases mr_mip is passed as the first parameter */ #ifdef MR_GENERIC_AND_STATIC return mirsys_basic(mr_mip,nd,nb); #endif #ifdef MR_GENERIC_MT #ifndef MR_STATIC miracl *mr_mip=mr_first_alloc(); return mirsys_basic(mr_mip,nd,nb); #endif #endif /* In these cases mr_mip is a "global" pointer and the mip itself is allocated from the heap. In fact mr_mip (and mip) may be thread specific if some multi-threading scheme is implemented */ #ifndef MR_STATIC #ifdef MR_WINDOWS_MT miracl *mr_mip=mr_first_alloc(); TlsSetValue(mr_key,mr_mip); #endif #ifdef MR_UNIX_MT miracl *mr_mip=mr_first_alloc(); pthread_setspecific(mr_key,mr_mip); #endif #ifdef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_OPENMP_MT mr_mip=mr_first_alloc(); #endif #endif #endif #endif #ifndef MR_GENERIC_MT mr_mip=get_mip(); #endif return mirsys_basic(mr_mip,nd,nb); } miracl *mirsys_basic(miracl *mr_mip,int nd,mr_small nb) { #ifndef MR_NO_RAND int i; #endif mr_small b,nw; #ifdef MR_FP mr_small dres; #endif if (mr_mip==NULL) return NULL; #ifndef MR_STRIPPED_DOWN mr_mip->depth=0; mr_mip->trace[0]=0; mr_mip->depth++; mr_mip->trace[mr_mip->depth]=29; #endif /* digest hardware configuration */ #ifdef MR_NO_STANDARD_IO mr_mip->ERCON=TRUE; #else mr_mip->ERCON=FALSE; #endif #ifndef MR_STATIC mr_mip->logN=0; mr_mip->degree=0; mr_mip->chin.NP=0; #endif mr_mip->user=NULL; mr_mip->same=FALSE; mr_mip->first_one=FALSE; mr_mip->debug=FALSE; mr_mip->AA=0; #ifndef MR_AFFINE_ONLY mr_mip->coord=MR_NOTSET; #endif #ifdef MR_NOFULLWIDTH if (nb==0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif #ifndef MR_FP #ifdef mr_dltype #ifndef MR_NOFULLWIDTH if (sizeof(mr_dltype)<2*sizeof(mr_utype)) { /* double length type, isn't */ mr_berror(_MIPP_ MR_ERR_NOT_DOUBLE_LEN); MR_OUT return mr_mip; } #endif #endif #endif if (nb==1 || nb>MAXBASE) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #ifdef MR_FP_ROUNDING if (mr_setbase(_MIPP_ nb)==0) { /* unable in fact to control FP rounding */ mr_berror(_MIPP_ MR_ERR_NO_ROUNDING); MR_OUT return mr_mip; } #else mr_setbase(_MIPP_ nb); #endif b=mr_mip->base; #ifdef MR_SIMPLE_BASE if (b!=0) { mr_berror(_MIPP_ MR_ERR_BAD_BASE); MR_OUT return mr_mip; } #endif mr_mip->lg2b=0; mr_mip->base2=1; #ifndef MR_SIMPLE_BASE if (b==0) { #endif mr_mip->lg2b=MIRACL; mr_mip->base2=0; #ifndef MR_SIMPLE_BASE } else while (b>1) { b=MR_DIV(b,2); mr_mip->lg2b++; mr_mip->base2*=2; } #endif #ifdef MR_ALWAYS_BINARY if (mr_mip->base!=mr_mip->base2) { mr_berror(_MIPP_ MR_ERR_NOT_BINARY); MR_OUT return mr_mip; } #endif /* calculate total space for bigs */ /* big -> |int len|small *ptr| alignment space | size in words +1| alignment up to multiple of 4 | */ if (nd>0) nw=MR_ROUNDUP(nd,mr_mip->pack); else nw=MR_ROUNDUP(8*(-nd),mr_mip->lg2b); if (nw<1) nw=1; mr_mip->nib=(int)(nw+1); /* add one extra word for small overflows */ #ifdef MR_STATIC if (nw>MR_STATIC) { mr_berror(_MIPP_ MR_ERR_TOO_BIG); MR_OUT return mr_mip; } #endif /* mr_mip->nib=(int)(nw+1); add one extra word for small overflows */ #ifdef MR_FLASH mr_mip->workprec=mr_mip->nib; mr_mip->stprec=mr_mip->nib; while (mr_mip->stprec>2 && mr_mip->stprec>MR_FLASH/mr_mip->lg2b) mr_mip->stprec=(mr_mip->stprec+1)/2; if (mr_mip->stprec<2) mr_mip->stprec=2; #endif #ifndef MR_DOUBLE_BIG mr_mip->check=ON; #else mr_mip->check=OFF; #endif #ifndef MR_SIMPLE_BASE #ifndef MR_SIMPLE_IO mr_mip->IOBASE=10; /* defaults */ #endif #endif mr_mip->ERNUM=0; mr_mip->NTRY=6; mr_mip->MONTY=ON; #ifdef MR_FLASH mr_mip->EXACT=TRUE; mr_mip->RPOINT=OFF; #endif #ifndef MR_STRIPPED_DOWN mr_mip->TRACER=OFF; #endif #ifndef MR_SIMPLE_IO mr_mip->INPLEN=0; mr_mip->IOBSIZ=MR_DEFAULT_BUFFER_SIZE; #endif #ifdef MR_STATIC mr_mip->PRIMES=mr_small_primes; #else mr_mip->PRIMES=NULL; #ifndef MR_SIMPLE_IO mr_mip->IOBUFF=(char *)mr_alloc(_MIPP_ MR_DEFAULT_BUFFER_SIZE+1,1); #endif #endif #ifndef MR_SIMPLE_IO mr_mip->IOBUFF[0]='\0'; #endif mr_mip->qnr=0; mr_mip->cnr=0; mr_mip->TWIST=0; mr_mip->pmod8=0; mr_mip->pmod9=0; /* quick start for rng. irand(.) should be called first before serious use.. */ #ifndef MR_NO_RAND mr_mip->ira[0]=0x55555555; mr_mip->ira[1]=0x12345678; for (i=2;i<NK;i++) mr_mip->ira[i]=mr_mip->ira[i-1]+mr_mip->ira[i-2]+0x1379BDF1; mr_mip->rndptr=NK; mr_mip->borrow=0; #endif mr_mip->nib=2*mr_mip->nib+1; #ifdef MR_FLASH if (mr_mip->nib!=(mr_mip->nib&(MR_MSK))) #else if (mr_mip->nib!=(int)(mr_mip->nib&(MR_OBITS))) #endif { mr_berror(_MIPP_ MR_ERR_TOO_BIG); mr_mip->nib=(mr_mip->nib-1)/2; MR_OUT return mr_mip; } #ifndef MR_STATIC mr_mip->workspace=(char *)memalloc(_MIPP_ MR_SPACES); /* grab workspace */ #else memset(mr_mip->workspace,0,MR_BIG_RESERVE(MR_SPACES)); #endif mr_mip->M=0; mr_mip->fin=FALSE; mr_mip->fout=FALSE; mr_mip->active=ON; mr_mip->nib=(mr_mip->nib-1)/2; /* allocate memory for workspace variables */ #ifndef MR_DOUBLE_BIG mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* double length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,2); mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,3); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,4); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,5); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,6); /* double length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,8); /* double length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,10); /* double length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,12); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,13); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,14); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,15); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,16); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,17); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,18); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,19); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,20); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,21); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,22); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,26); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,29); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,31); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,27); #endif #endif #else /* w0-w7 are double normal length */ mr_mip->w0=mirvar_mem(_MIPP_ mr_mip->workspace,0); /* quad length */ mr_mip->w1=mirvar_mem(_MIPP_ mr_mip->workspace,4); /* double length */ mr_mip->w2=mirvar_mem(_MIPP_ mr_mip->workspace,6); mr_mip->w3=mirvar_mem(_MIPP_ mr_mip->workspace,8); mr_mip->w4=mirvar_mem(_MIPP_ mr_mip->workspace,10); mr_mip->w5=mirvar_mem(_MIPP_ mr_mip->workspace,12); /* quad length */ mr_mip->w6=mirvar_mem(_MIPP_ mr_mip->workspace,16); /* quad length */ mr_mip->w7=mirvar_mem(_MIPP_ mr_mip->workspace,20); /* quad length */ mr_mip->w8=mirvar_mem(_MIPP_ mr_mip->workspace,24); mr_mip->w9=mirvar_mem(_MIPP_ mr_mip->workspace,25); mr_mip->w10=mirvar_mem(_MIPP_ mr_mip->workspace,26); mr_mip->w11=mirvar_mem(_MIPP_ mr_mip->workspace,27); mr_mip->w12=mirvar_mem(_MIPP_ mr_mip->workspace,28); mr_mip->w13=mirvar_mem(_MIPP_ mr_mip->workspace,29); mr_mip->w14=mirvar_mem(_MIPP_ mr_mip->workspace,30); mr_mip->w15=mirvar_mem(_MIPP_ mr_mip->workspace,31); mr_mip->sru=mirvar_mem(_MIPP_ mr_mip->workspace,32); mr_mip->modulus=mirvar_mem(_MIPP_ mr_mip->workspace,33); mr_mip->pR=mirvar_mem(_MIPP_ mr_mip->workspace,34); /* double length */ mr_mip->A=mirvar_mem(_MIPP_ mr_mip->workspace,36); mr_mip->B=mirvar_mem(_MIPP_ mr_mip->workspace,37); mr_mip->one=mirvar_mem(_MIPP_ mr_mip->workspace,38); #ifdef MR_KCM mr_mip->big_ndash=mirvar_mem(_MIPP_ mr_mip->workspace,39); mr_mip->ws=mirvar_mem(_MIPP_ mr_mip->workspace,40); mr_mip->wt=mirvar_mem(_MIPP_ mr_mip->workspace,41); /* double length */ #endif #ifdef MR_FLASH #ifdef MR_KCM mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,43); #else mr_mip->pi=mirvar_mem(_MIPP_ mr_mip->workspace,39); #endif #endif #endif MR_OUT return mr_mip; } #ifndef MR_STATIC /* allocate space for a number of bigs from the heap */ void *memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif return mr_alloc(_MIPP_ mr_big_reserve(num,mr_mip->nib-1),1); } #endif void memkill(_MIPD_ char *mem,int len) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; memset(mem,0,mr_big_reserve(len,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void mirkill(big x) { /* kill a big/flash variable, that is set it to zero and free its memory */ if (x==NULL) return; zero(x); mr_free(x); } #endif void mirexit(_MIPDO_ ) { /* clean up after miracl */ int i; #ifdef MR_WINDOWS_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_UNIX_MT miracl *mr_mip=get_mip(); #endif #ifdef MR_OPENMP_MT miracl *mr_mip=get_mip(); #endif mr_mip->ERCON=FALSE; mr_mip->active=OFF; memkill(_MIPP_ mr_mip->workspace,MR_SPACES); #ifndef MR_NO_RAND for (i=0;i<NK;i++) mr_mip->ira[i]=0L; #endif #ifndef MR_STATIC #ifndef MR_SIMPLE_IO set_io_buffer_size(_MIPP_ 0); #endif if (mr_mip->PRIMES!=NULL) mr_free(mr_mip->PRIMES); #else #ifndef MR_SIMPLE_IO for (i=0;i<=MR_DEFAULT_BUFFER_SIZE;i++) mr_mip->IOBUFF[i]=0; #endif #endif #ifndef MR_STATIC mr_free(mr_mip); #ifdef MR_WINDOWS_MT TlsSetValue(mr_key, NULL); /* Thank you Thales */ #endif #endif #ifndef MR_GENERIC_MT #ifndef MR_WINDOWS_MT #ifndef MR_UNIX_MT #ifndef MR_STATIC mr_mip=NULL; #endif #endif #endif #endif #ifdef MR_OPENMP_MT mr_mip=NULL; #endif } int exsign(flash x) { /* extract sign of big/flash number */ if ((x->len&(MR_MSBIT))==0) return PLUS; else return MINUS; } void insign(int s,flash x) { /* assert sign of big/flash number */ if (x->len==0) return; if (s<0) x->len|=MR_MSBIT; else x->len&=MR_OBITS; } void mr_lzero(big x) { /* strip leading zeros from big number */ mr_lentype s; int m; s=(x->len&(MR_MSBIT)); m=(int)(x->len&(MR_OBITS)); while (m>0 && x->w[m-1]==0) m--; x->len=m; if (m>0) x->len|=s; } #ifndef MR_SIMPLE_IO int getdig(_MIPD_ big x,int i) { /* extract a packed digit */ int k; mr_small n; #ifdef MR_FP mr_small dres; #endif #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif i--; n=x->w[i/mr_mip->pack]; if (mr_mip->pack==1) return (int)n; k=i%mr_mip->pack; for (i=1;i<=k;i++) n=MR_DIV(n,mr_mip->apbase); return (int)MR_REMAIN(n,mr_mip->apbase); } int numdig(_MIPD_ big x) { /* returns number of digits in x */ int nd; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (x->len==0) return 0; nd=(int)(x->len&(MR_OBITS))*mr_mip->pack; while (getdig(_MIPP_ x,nd)==0) nd--; return nd; } void putdig(_MIPD_ int n,big x,int i) { /* insert a digit into a packed word */ int j,k,lx; mr_small m,p; mr_lentype s; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(26) s=(x->len&(MR_MSBIT)); lx=(int)(x->len&(MR_OBITS)); m=getdig(_MIPP_ x,i); p=n; i--; j=i/mr_mip->pack; k=i%mr_mip->pack; for (i=1;i<=k;i++) { m*=mr_mip->apbase; p*=mr_mip->apbase; } if (j>=mr_mip->nib && (mr_mip->check || j>=2*mr_mip->nib)) { mr_berror(_MIPP_ MR_ERR_OVERFLOW); MR_OUT return; } x->w[j]=(x->w[j]-m)+p; if (j>=lx) x->len=((j+1)|s); mr_lzero(x); MR_OUT } #endif #ifndef MR_FP void mr_and(big x,big y,big z) { /* z= bitwise logical AND of x and y */ int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=ny; else nr=nx; for (i=0;i<nr;i++) z->w[i]=x->w[i]&y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; mr_lzero(z); } void mr_xor(big x,big y,big z) { int i,nx,ny,nz,nr; if (x==y) { copy(x,z); return; } #ifdef MR_FLASH nx=mr_lent(x); ny=mr_lent(y); nz=mr_lent(z); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); nz=(z->len&(MR_OBITS)); #endif if (ny<nx) nr=nx; else nr=ny; for (i=0;i<nr;i++) z->w[i]=x->w[i]^y->w[i]; for (i=nr;i<nz;i++) z->w[i]=0; z->len=nr; mr_lzero(z); } #endif void copy(flash x,flash y) { /* copy x to y: y=x */ int i,nx,ny; mr_small *gx,*gy; if (x==y || y==NULL) return; if (x==NULL) { zero(y); return; } #ifdef MR_FLASH ny=mr_lent(y); nx=mr_lent(x); #else ny=(y->len&(MR_OBITS)); nx=(x->len&(MR_OBITS)); #endif gx=x->w; gy=y->w; for (i=nx;i<ny;i++) gy[i]=0; for (i=0;i<nx;i++) gy[i]=gx[i]; y->len=x->len; } void negify(flash x,flash y) { /* negate a big/flash variable: y=-x */ copy(x,y); if (y->len!=0) y->len^=MR_MSBIT; } void absol(flash x,flash y) { /* y=abs(x) */ copy(x,y); y->len&=MR_OBITS; } BOOL mr_notint(flash x) { /* returns TRUE if x is Flash */ #ifdef MR_FLASH if ((((x->len&(MR_OBITS))>>(MR_BTS))&(MR_MSK))!=0) return TRUE; #endif return FALSE; } void mr_shift(_MIPD_ big x,int n,big w) { /* set w=x.(mr_base^n) by shifting */ mr_lentype s; int i,bl; mr_small *gw=w->w; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; copy(x,w); if (w->len==0 || n==0) return; MR_IN(33) if (mr_notint(w)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(w->len&(MR_MSBIT)); bl=(int)(w->len&(MR_OBITS))+n; if (bl<=0) { zero(w); MR_OUT return; } if (bl>mr_mip->nib && mr_mip->check) mr_berror(_MIPP_ MR_ERR_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } if (n>0) { for (i=bl-1;i>=n;i--) gw[i]=gw[i-n]; for (i=0;i<n;i++) gw[i]=0; } else { n=(-n); for (i=0;i<bl;i++) gw[i]=gw[i+n]; for (i=0;i<n;i++) gw[bl+i]=0; } w->len=(bl|s); MR_OUT } int size(big x) { /* get size of big number; convert to * * integer - if possible */ int n,m; mr_lentype s; if (x==NULL) return 0; s=(x->len&MR_MSBIT); m=(int)(x->len&MR_OBITS); if (m==0) return 0; if (m==1 && x->w[0]<(mr_small)MR_TOOBIG) n=(int)x->w[0]; else n=MR_TOOBIG; if (s==MR_MSBIT) return (-n); return n; } int mr_compare(big x,big y) { /* compare x and y: =1 if x>y =-1 if x<y * * =0 if x=y */ int m,n,sig; mr_lentype sx,sy; if (x==y) return 0; sx=(x->len&MR_MSBIT); sy=(y->len&MR_MSBIT); if (sx==0) sig=PLUS; else sig=MINUS; if (sx!=sy) return sig; m=(int)(x->len&MR_OBITS); n=(int)(y->len&MR_OBITS); if (m>n) return sig; if (m<n) return -sig; while (m>0) { /* check digit by digit */ m--; if (x->w[m]>y->w[m]) return sig; if (x->w[m]<y->w[m]) return -sig; } return 0; } #ifdef MR_FLASH void fpack(_MIPD_ big n,big d,flash x) { /* create floating-slash number x=n/d from * * big integer numerator and denominator */ mr_lentype s; int i,ld,ln; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; MR_IN(31) ld=(int)(d->len&MR_OBITS); if (ld==0) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (ld==1 && d->w[0]==1) ld=0; if (x==d) mr_berror(_MIPP_ MR_ERR_BAD_PARAMETERS); if (mr_notint(n) || mr_notint(d)) mr_berror(_MIPP_ MR_ERR_INT_OP); s=(n->len&MR_MSBIT); ln=(int)(n->len&MR_OBITS); if (ln==1 && n->w[0]==1) ln=0; if ((ld+ln>mr_mip->nib) && (mr_mip->check || ld+ln>2*mr_mip->nib)) mr_berror(_MIPP_ MR_ERR_FLASH_OVERFLOW); if (mr_mip->ERNUM) { MR_OUT return; } copy(n,x); if (n->len==0) { MR_OUT return; } s^=(d->len&MR_MSBIT); if (ld==0) { if (x->len!=0) x->len|=s; MR_OUT return; } for (i=0;i<ld;i++) x->w[ln+i]=d->w[i]; x->len=(s|(ln+((mr_lentype)ld<<MR_BTS))); MR_OUT } void numer(_MIPD_ flash x,big y) { /* extract numerator of x */ int i,ln,ld; mr_lentype s,ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (mr_notint(x)) { s=(x->len&MR_MSBIT); ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); if (ln==0) { if(s==MR_MSBIT) convert(_MIPP_ (-1),y); else convert(_MIPP_ 1,y); return; } ld=(int)((ly>>MR_BTS)&MR_MSK); if (x!=y) { for (i=0;i<ln;i++) y->w[i]=x->w[i]; for (i=ln;i<mr_lent(y);i++) y->w[i]=0; } else for (i=0;i<ld;i++) y->w[ln+i]=0; y->len=(ln|s); } else copy(x,y); } void denom(_MIPD_ flash x,big y) { /* extract denominator of x */ int i,ln,ld; mr_lentype ly; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return; if (!mr_notint(x)) { convert(_MIPP_ 1,y); return; } ly=(x->len&MR_OBITS); ln=(int)(ly&MR_MSK); ld=(int)((ly>>MR_BTS)&MR_MSK); for (i=0;i<ld;i++) y->w[i]=x->w[ln+i]; if (x==y) for (i=0;i<ln;i++) y->w[ld+i]=0; else for (i=ld;i<mr_lent(y);i++) y->w[i]=0; y->len=ld; } #endif unsigned int igcd(unsigned int x,unsigned int y) { /* integer GCD, returns GCD of x and y */ unsigned int r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned long lgcd(unsigned long x,unsigned long y) { /* long GCD, returns GCD of x and y */ unsigned long r; if (y==0) return x; while ((r=x%y)!=0) x=y,y=r; return y; } unsigned int isqrt(unsigned int num,unsigned int guess) { /* square root of an integer */ unsigned int sqr; unsigned int oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } unsigned long mr_lsqrt(unsigned long num,unsigned long guess) { /* square root of a long */ unsigned long sqr; unsigned long oldguess=guess; if (num==0) return 0; if (num<4) return 1; for (;;) { /* Newtons iteration */ /* sqr=guess+(((num/guess)-guess)/2); */ sqr=((num/guess)+guess)/2; if (sqr==guess || sqr==oldguess) { if (sqr*sqr>num) sqr--; return sqr; } oldguess=guess; guess=sqr; } } mr_small sgcd(mr_small x,mr_small y) { /* integer GCD, returns GCD of x and y */ mr_small r; #ifdef MR_FP mr_small dres; #endif if (y==(mr_small)0) return x; while ((r=MR_REMAIN(x,y))!=(mr_small)0) x=y,y=r; return y; } /* routines to support sliding-windows exponentiation * * in various contexts */ int mr_testbit(_MIPD_ big x,int n) { /* return value of n-th bit of big */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifdef MR_FP mr_small m,a,dres; m=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); a=x->w[n/mr_mip->lg2b]; a=MR_DIV(a,m); if ((MR_DIV(a,2.0)*2.0) != a) return 1; #else if ((x->w[n/mr_mip->lg2b] & ((mr_small)1<<(n%mr_mip->lg2b))) >0) return 1; #endif return 0; } void mr_addbit(_MIPD_ big x,int n) { /* add 2^n to positive x - where you know that bit is zero. Use with care! */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif mr_lentype m=n/mr_mip->lg2b; x->w[m]+=mr_shiftbits((mr_small)1,n%mr_mip->lg2b); if (x->len<m+1) x->len=m+1; } int recode(_MIPD_ big e,int t,int w,int i) { /* recode exponent for Comb method */ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r; r=0; for (j=w-1;j>=0;j--) { r<<=1; r|=mr_testbit(_MIPP_ e,i+j*t); } return r; } int mr_window(_MIPD_ big x,int i,int *nbs,int * nzs,int window_size) { /* returns sliding window value, max. of 5 bits, * * (Note from version 5.23 this can be changed by * * setting parameter window_size. This can be * * a useful space-saver) starting at i-th bit of big x. * * nbs is number of bits processed, nzs is the number of * * additional trailing zeros detected. Returns valid bit * * pattern 1x..x1 with no two adjacent 0's. So 10101 * * will return 21 with nbs=5, nzs=0. 11001 will return 3,* * with nbs=2, nzs=2, having stopped after the first 11..*/ #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif int j,r,w; w=window_size; /* check for leading 0 bit */ *nbs=1; *nzs=0; if (!mr_testbit(_MIPP_ x,i)) return 0; /* adjust window size if not enough bits left */ if (i-w+1<0) w=i+1; r=1; for (j=i-1;j>i-w;j--) { /* accumulate bits. Abort if two 0's in a row */ (*nbs)++; r*=2; if (mr_testbit(_MIPP_ x,j)) r+=1; if (r%4==0) { /* oops - too many zeros - shorten window */ r/=4; *nbs-=2; *nzs=2; break; } } if (r%2==0) { /* remove trailing 0 */ r/=2; *nzs=1; (*nbs)--; } return r; } int mr_window2(_MIPD_ big x,big y,int i,int *nbs,int *nzs) { /* two bit window for double exponentiation */ int r,w; BOOL a,b,c,d; w=2; *nbs=1; *nzs=0; /* check for two leading 0's */ a=mr_testbit(_MIPP_ x,i); b=mr_testbit(_MIPP_ y,i); if (!a && !b) return 0; if (i<1) w=1; if (a) { if (b) r=3; else r=2; } else r=1; if (w==1) return r; c=mr_testbit(_MIPP_ x,i-1); d=mr_testbit(_MIPP_ y,i-1); if (!c && !d) { *nzs=1; return r; } *nbs=2; r*=4; if (c) { if (d) r+=3; else r+=2; } else r+=1; return r; } int mr_naf_window(_MIPD_ big x,big x3,int i,int *nbs,int *nzs,int store) { /* returns sliding window value, using fractional windows * * where "store" precomputed values are precalulated and * * stored. Scanning starts at the i-th bit of x. nbs is * * the number of bits processed. nzs is number of * * additional trailing zeros detected. x and x3 (which is * * 3*x) are combined to produce the NAF (non-adjacent * * form). So if x=11011(27) and x3 is 1010001, the LSB is * * ignored and the value 100T0T (32-4-1=27) processed, * * where T is -1. Note x.P = (3x-x)/2.P. This value will * * return +7, with nbs=4 and nzs=1, having stopped after * * the first 4 bits. If it goes too far, it must backtrack * * Note in an NAF non-zero elements are never side by side, * * so 10T10T won't happen. NOTE: return value n zero or * * odd, -21 <= n <= +21 */ int nb,j,r,biggest; /* get first bit */ nb=mr_testbit(_MIPP_ x3,i)-mr_testbit(_MIPP_ x,i); *nbs=1; *nzs=0; if (nb==0) return 0; if (i==0) return nb; biggest=2*store-1; if (nb>0) r=1; else r=(-1); for (j=i-1;j>0;j--) { (*nbs)++; r*=2; nb=mr_testbit(_MIPP_ x3,j)-mr_testbit(_MIPP_ x,j); if (nb>0) r+=1; if (nb<0) r-=1; if (abs(r)>biggest) break; } if (r%2!=0 && j!=0) { /* backtrack */ if (nb>0) r=(r-1)/2; if (nb<0) r=(r+1)/2; (*nbs)--; } while (r%2==0) { /* remove trailing zeros */ r/=2; (*nzs)++; (*nbs)--; } return r; } /* Some general purpose elliptic curve stuff */ BOOL point_at_infinity(epoint *p) { if (p==NULL) return FALSE; if (p->marker==MR_EPOINT_INFINITY) return TRUE; return FALSE; } #ifndef MR_STATIC epoint* epoint_init(_MIPDO_ ) { /* initialise epoint to general point at infinity. */ epoint *p; char *ptr; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; MR_IN(96) /* Create space for whole structure in one heap access */ p=(epoint *)mr_alloc(_MIPP_ mr_esize(mr_mip->nib-1),1); ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem(_MIPP_ ptr,0); p->Y=mirvar_mem(_MIPP_ ptr,1); #ifndef MR_AFFINE_ONLY p->Z=mirvar_mem(_MIPP_ ptr,2); #endif p->marker=MR_EPOINT_INFINITY; MR_OUT return p; } #endif epoint* epoint_init_mem_variable(_MIPD_ char *mem,int index,int sz) { epoint *p; char *ptr; int offset,r; #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif offset=0; r=(unsigned long)mem%MR_SL; if (r>0) offset=MR_SL-r; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) p=(epoint *)&mem[offset+index*mr_esize_a(sz)]; else #endif p=(epoint *)&mem[offset+index*mr_esize(sz)]; ptr=(char *)p+sizeof(epoint); p->X=mirvar_mem_variable(ptr,0,sz); p->Y=mirvar_mem_variable(ptr,1,sz); #ifndef MR_AFFINE_ONLY if (mr_mip->coord!=MR_AFFINE) p->Z=mirvar_mem_variable(ptr,2,sz); #endif p->marker=MR_EPOINT_INFINITY; return p; } epoint* epoint_init_mem(_MIPD_ char *mem,int index) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mr_mip->ERNUM) return NULL; return epoint_init_mem_variable(_MIPP_ mem,index,mr_mip->nib-1); } #ifndef MR_STATIC /* allocate space for a number of epoints from the heap */ void *ecp_memalloc(_MIPD_ int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) return mr_alloc(_MIPP_ mr_ecp_reserve_a(num,mr_mip->nib-1),1); else #endif return mr_alloc(_MIPP_ mr_ecp_reserve(num,mr_mip->nib-1),1); } #endif void ecp_memkill(_MIPD_ char *mem,int num) { #ifdef MR_OS_THREADS miracl *mr_mip=get_mip(); #endif if (mem==NULL) return; #ifndef MR_AFFINE_ONLY if (mr_mip->coord==MR_AFFINE) memset(mem,0,mr_ecp_reserve_a(num,mr_mip->nib-1)); else #endif memset(mem,0,mr_ecp_reserve(num,mr_mip->nib-1)); #ifndef MR_STATIC mr_free(mem); #endif } #ifndef MR_STATIC void epoint_free(epoint *p) { /* clean up point */ if (p==NULL) return; zero(p->X); zero(p->Y); #ifndef MR_AFFINE_ONLY if (p->marker==MR_EPOINT_GENERAL) zero(p->Z); #endif mr_free(p); } #endif
GB_binop__lxor_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lxor_int32 // A.*B function (eWiseMult): GB_AemultB__lxor_int32 // A*D function (colscale): GB_AxD__lxor_int32 // D*A function (rowscale): GB_DxB__lxor_int32 // C+=B function (dense accum): GB_Cdense_accumB__lxor_int32 // C+=b function (dense accum): GB_Cdense_accumb__lxor_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_int32 // C=scalar+B GB_bind1st__lxor_int32 // C=scalar+B' GB_bind1st_tran__lxor_int32 // C=A+scalar GB_bind2nd__lxor_int32 // C=A'+scalar GB_bind2nd_tran__lxor_int32 // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) != (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT32 || GxB_NO_LXOR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lxor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lxor_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lxor_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lxor_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lxor_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__lxor_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lxor_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lxor_int32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t bij = Bx [p] ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lxor_int32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__lxor_int32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__lxor_int32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sequence2batch.h
#ifndef ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H #define ANAKIN_SABER_FUNC_IMPL_X86_MATH_SEQUENCE_BATCH_H #include <algorithm> #include <vector> #include "saber/core/tensor.h" #include "saber/funcs/impl/x86/x86_utils.h" namespace anakin { namespace saber { namespace math { template <DataType Dtype, typename LayOutType> class CopyMatrixRowsFunctor { public: typedef Tensor<X86> ioTensor; typedef typename DataTrait<X86, Dtype>::Dtype dtype; // If is_src_index is true, // copy the indexed rows of input src to the output dst. // If is_src_index is false, // copy the input src to the indexed rows of output dst. // The indexed rows are based on the input index. void operator()(ioTensor* src, std::vector<int> index_lod, ioTensor* dst, bool is_src_index, int fragment_num, int offset = 0, int width = 0); }; template <DataType Dtype, typename LayOutType> class Seq2BatchFunctor { // Calculate the length of each sequence and // sort sequence index by the length. // example: sequences = {s0, s1, s2} // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 // seq_info[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)} // struct SeqInfo { SeqInfo(int start, int length, int seq_idx) : start(start), length(length), seq_idx(seq_idx) {} int start; int length; int seq_idx; }; public: typedef Tensor<X86> ioTensor; void operator()(ioTensor* seq, ioTensor* batch, std::vector<std::vector<int>>& seq_to_batch_meta, bool is_cal_batch_lod, bool is_reverse = false, int fragment_num = 1) const { if (!is_cal_batch_lod) { if (seq_to_batch_meta.size() < 2) { LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information."; exit(-1); } if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) { LOG(ERROR) << "The seq_to_batch information should be consistent with the dims."; exit(-1); } CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch; to_batch(seq, seq_to_batch_meta[1], batch, true, fragment_num); return; } if (seq_to_batch_meta.size() != 1) { LOG(ERROR) << "Only support one level sequence now."; exit(-1); } auto seq_meta = seq_to_batch_meta[0]; std::vector<SeqInfo> seq_info; for (int seq_id = 0; seq_id < seq_meta.size() - 1; ++seq_id) { int length = seq_meta[seq_id + 1] - seq_meta[seq_id]; seq_info.emplace_back(seq_meta[seq_id], length, seq_id); //LOG(INFO) << "seq_meta[seq_id]:" << seq_meta[seq_id] << " length:" << length << " seq_id:" <<seq_id; } std::sort(seq_info.begin(), seq_info.end(), [](SeqInfo a, SeqInfo b) { return a.length > b.length; }); // Calculate the start position of each batch. // example: sequences = {s0, s1, s2} // s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 // num_batch = 5, // batchIndex = {b0, b1, b2, b3, b4} // b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 // batch_start_positions[6] = {0, 3, 6, 9, 11, 12} // batch_start_positions[0] = len(b0) // batch_start_positions[1] = len(b0) + len(b1) // batch_start_positions[2] = len(b0) + len(b1) + len(b2) // ... // seq2batch_idx[12] = {4, 0, 9, // 5, 1, 10, // 6, 2, 11, // 7, 3, // 8} // seq_order = {1, 0, 2}, the sort order. // where 1 is the second sequence, // 0 is the first sequence, // 2 is the third sequence. // The num_batch represents batch size after rearranging the // input LodTensor. It is also the maximum length of input sequence. std::vector<std::vector<int>> batch_seq_meta; batch_seq_meta.emplace_back(std::vector<int> {0}); batch_seq_meta.emplace_back(std::vector<int> {0}); batch_seq_meta.emplace_back(std::vector<int> {0}); // batch_seq_meta[0] is the start positions for batch LoDTensor int num_batch = seq_info[0].length; batch_seq_meta[0].resize(static_cast<int>(num_batch + 1)); // batch_seq_meta[1] is the raw index in the input LoDTensor batch_seq_meta[1].resize(static_cast<int>(seq->num())); // batch_seq_meta[2] is the sort order for the input LoDTensor. batch_seq_meta[2].resize(seq_info.size()); int* batch_starts = batch_seq_meta[0].data(); int* seq2batch_idx = batch_seq_meta[1].data(); batch_starts[0] = 0; for (int n = 0; n < num_batch; n++) { auto batch_id = static_cast<int>(batch_starts[n]); for (int i = 0; i < seq_info.size(); ++i) { int seq_len = seq_info[i].length; int start = seq_info[i].start; if (n < seq_len) { seq2batch_idx[batch_id] = is_reverse ? start + seq_len - 1 - n : start + n; batch_id++; } else { break; } } batch_starts[n + 1] = static_cast<int>(batch_id); } int* seq_order = batch_seq_meta[2].data(); for (int i = 0; i < seq_info.size(); ++i) { seq_order[i] = seq_info[i].seq_idx; } seq_to_batch_meta = batch_seq_meta; CopyMatrixRowsFunctor<Dtype, LayOutType> to_batch; to_batch(seq, batch_seq_meta[1], batch, true, fragment_num); } }; template <DataType Dtype, typename LayOutType> class Batch2SeqFunctor { public: typedef Tensor<X86> ioTensor; void operator()(ioTensor* batch, ioTensor* seq, std::vector<std::vector<int>>& seq_to_batch_meta, int fragment_num = 1, int offset = 0, int width = 0) const { if (seq_to_batch_meta.size() < 2) { LOG(ERROR) << "The size of seq_to_batch_meta should inlcude at least 2-level sequence information."; exit(-1); } if (seq_to_batch_meta[1].size() != static_cast<int>(seq->num())) { LOG(ERROR) << "The seq_to_batch information should be consistent with the dims."; exit(-1); } CopyMatrixRowsFunctor<Dtype, LayOutType> to_seq; to_seq(batch, seq_to_batch_meta[1], seq, false, fragment_num, offset, width); } }; template <DataType Dtype, typename LayOutType> class ReorderInitState { public: typedef Tensor<X86> ioTensor; void operator()(ioTensor* src, std::vector<int> ind_lod, ioTensor* dst, bool indexed_src, int fragment_num = 1) { math::CopyMatrixRowsFunctor<Dtype, LayOutType> row_shuffle; row_shuffle(src, ind_lod, dst, indexed_src, fragment_num); } }; /* * This class can used to modify the matrix structure of sequence matrix into * batch structure. * sequence matrix: [C1_s ... Cn_s | ...... | C1_t ... Cn_t] * batch matrix: [C1_s ... C1_t | ...... | Cn_s ... Cn_t] * Cn_s is the state for sequence s at time n. * * Exampel: sequence matrix = {{0, 0, 0, 0}, {1, 1, 1, 1, 1}, {2, 2, 2}} * s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 * batch matrix = {{1, 0, 2}, {1, 0, 2}, {1, 0, 2}, {1, 0}, {1}} * b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 * * Use: * Input: seqMatrix, seqStarts(Sequence Start Positions) * Output: batchMatrix * 1. SequenceToBatch seq2batch; * 2. seq2batch.resizeOrCreateBatch(seqStarts); // calculate seq2BatchIdx * 3. seq2batch.copy(seqMatrix, batchMatrix, true); // copy seq to batch matrix * */ class SequenceToBatch { public: SequenceToBatch() {}; template <typename Dtype> void seq_2_bat(const Dtype* input, Dtype* output, int word_size) { int word_sum = seq2BatchIdx_.size(); #pragma omp parallel for if(thread_num > 1) for (int old_id = 0; old_id < word_sum; ++old_id) { int word_start = old_id * word_size; int maped_id = seq2BatchIdx_[old_id]; int maped_start = maped_id * word_size; for (int word_offset = 0; word_offset < word_size; ++word_offset) { output[word_start + word_offset] = input[maped_start + word_offset]; } } } template <typename Dtype> void hidden_2_bat(const Dtype* input, Dtype* output, int hidden_size) { int batch_size = seqStartAndLength_.size(); for (int old_id = 0; old_id < batch_size; ++old_id) { int word_start = old_id * hidden_size; int maped_id = seqStartAndLength_[old_id].seqIdx_; int maped_start = maped_id * hidden_size; for (int word_offset = 0; word_offset < hidden_size; ++word_offset) { output[word_start + word_offset] = input[maped_start + word_offset]; } } } template <typename Dtype> void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size) { int word_sum = seq2BatchIdx_.size(); #pragma omp parallel for if(thread_num > 1) for (int old_id = 0; old_id < word_sum; old_id++) { int word_start = old_id * hidden_size; int maped_id = seq2BatchIdx_[old_id]; int maped_start = maped_id * hidden_size; for (int word_offset = 0; word_offset < hidden_size; word_offset++) { output[maped_start + word_offset] = input[word_start + word_offset]; } } } template <typename Dtype> void bat_2_seq(const Dtype* input, Dtype* output, int hidden_size, int aligned_hidden_size) { int word_sum = seq2BatchIdx_.size(); #pragma omp parallel for if(thread_num > 1) for (int old_id = 0; old_id < word_sum; old_id++) { int word_start = old_id * aligned_hidden_size; int maped_id = seq2BatchIdx_[old_id]; int maped_start = maped_id * hidden_size; for (int word_offset = 0; word_offset < hidden_size; word_offset++) { output[maped_start + word_offset] = input[word_start + word_offset]; } } } void get_batch_offset(std::vector<int>& bat_offset) { for (size_t i = 0; i < batchStartPositions_.size(); i++) { bat_offset[i] = batchStartPositions_[i]; } } size_t get_batch_num() const { return numBatch_; } void create_batch(int batchSize, size_t numSequences, std::vector<int>& seqStarts, bool reversed) { CHECK_EQ(seqStarts[numSequences], batchSize); seq2BatchIdx_.resize(batchSize); /* * calculate the length of each sequence & sort sequence index by the length * Exampel: Sequences = {s0, s1, s2} * s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 * seqStartAndLength_[3] = {(4, 5, 1), (0, 4, 0), (9, 3, 2)} */ for (size_t seqId = 0; seqId < numSequences; ++seqId) { int length = seqStarts[seqId + 1] - seqStarts[seqId]; seqStartAndLength_.emplace_back(seqStarts[seqId], length, seqId); } std::sort(seqStartAndLength_.begin(), seqStartAndLength_.end(), [](SeqStartAndLength a, SeqStartAndLength b) { return a.length_ > b.length_; }); /* * calculate the start position of each batch * (numBatch equal the maxLength of sequences) * Exampel: Sequences = {s0, s1, s2} * s0: 0 0 0 0, s1: 1 1 1 1 1, s2: 2 2 2 * numBatch = 5, * batchIndex = {b0, b1, b2, b3, b4} * b0: 1 0 2, b1: 1 0 2, b2: 1 0 2, b3: 1 0, b4: 1 * batchStartPositions[6] = {0, 3, 6, 9, 11, 12} */ numBatch_ = (size_t)seqStartAndLength_[0].length_; batchStartPositions_.resize(numBatch_ + 1); batchStartPositions_[0] = 0; for (size_t n = 0; n < numBatch_; n++) { int batchId = batchStartPositions_[n]; for (size_t i = 0; i < seqStartAndLength_.size(); ++i) { size_t seqLength = seqStartAndLength_[i].length_; int start = seqStartAndLength_[i].start_; if (n < seqLength) { if (!reversed) { seq2BatchIdx_[batchId] = start + n; } else { seq2BatchIdx_[batchId] = start + seqLength - 1 - n; } batchId++; } else { break; } } batchStartPositions_[n + 1] = batchId; } } protected: struct SeqStartAndLength { int start_; int length_; int seqIdx_; SeqStartAndLength(int start, int length, int seqIdx) : start_(start), length_(length), seqIdx_(seqIdx) {} }; std::vector<SeqStartAndLength> seqStartAndLength_; std::vector<int> batchStartPositions_; std::vector<int> seq2BatchIdx_; size_t numBatch_; int thread_num = omp_get_max_threads(); }; } // namespace math } // namespace saber } // namespace anakin #endif
clause-2.c
/* PR c/34506 */ /* { dg-do compile } */ #define p parallel void foo (int x) { #pragma omp p num_threads (4) if (1) private (x) ; #pragma omp p num_threads(4)if(1)private(x) ; #pragma omp p num_threads (4), if (1) , private (x) ; #pragma omp p num_threads(4),if(1),private(x) ; #pragma omp p, num_threads (4), if (1), private (x) /* { dg-error "clause before" } */ ; #pragma omp p num_threads (4), if (1), private (x), /* { dg-error "clause before" } */ ; #pragma omp p num_threads (4), , if (1), private (x) /* { dg-error "clause before" } */ ; }
target_data.c
// RUN: %libomptarget-compile-generic -fopenmp-version=51 // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target data map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(present, alloc: i) ; // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
lecuyer.h
/* * random number generator interface * L'Ecuyer, Mathematics of Computation, 65, pp 203-213 (96) */ /* Copyright (c) 2005, The Regents of the University of California. * All rights reserved. * This file is part of yorick (http://yorick.sourceforge.net). * Read the accompanying LICENSE file for details. */ /* __cplusplus is for version 2.0, c_plusplus for version 1.2 */ #ifdef __cplusplus extern "C" { #endif /* get random numbers between 0. and 1. one or n at a time */ extern double le_random(unsigned long *generator); extern void le_nrandom(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next(unsigned long *generator); /* seed the sequence with either double or long * -- 0 seed means reinitialize to default sequence * -- note that the complete state of the generator requires * three numbers, not one, so "seeding" can't reproduce * an arbitrary state -- copy the generator to do that */ extern void le_rseed(unsigned long *generator, double seed); extern void le_iseed(unsigned long *generator, unsigned long seed); /* above can all take generator==0, in which case, they use this one * -- if you create your own generator, none of the three values * can be 0; best to call one of the seed routines to initialize */ extern unsigned long le_generator[3]; #ifdef _OPENMP #pragma omp declare target /* get random numbers between 0. and 1. one or n at a time */ extern double le_random_omp45(unsigned long *generator); extern void le_nrandom_omp45(unsigned long *generator, long n, double *r); /* get underlying random integer between 1 and 2^32-1 (4,294,967,295) */ extern unsigned long le_next_omp45(unsigned long *generator); /* seed the sequence with either double or long * -- 0 seed means reinitialize to default sequence * -- note that the complete state of the generator requires * three numbers, not one, so "seeding" can't reproduce * an arbitrary state -- copy the generator to do that */ extern void le_rseed_omp45(unsigned long *generator, double seed); extern void le_iseed_omp45(unsigned long *generator, unsigned long seed); /* above can all take generator==0, in which case, they use this one * -- if you create your own generator, none of the three values * can be 0; best to call one of the seed routines to initialize */ extern unsigned long le_generator_omp45[3]; #pragma omp end declare target #endif #ifdef __cplusplus } #endif
pzgetrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" #include <plasma_core_blas.h> #define A(m, n) (plasma_complex64_t*)plasma_tile_addr(A, m, n) /******************************************************************************/ void plasma_pzgetrf(plasma_desc_t A, int *ipiv, plasma_sequence_t *sequence, plasma_request_t *request) { // Return if failed sequence. if (sequence->status != PlasmaSuccess) return; // Read parameters from the context. plasma_context_t *plasma = plasma_context_self(); // Set tiling parameters. int ib = plasma->ib; int minmtnt = imin(A.mt, A.nt); for (int k = 0; k < minmtnt; k++) { plasma_complex64_t *a00, *a20; a00 = A(k, k); a20 = A(A.mt-1, k); // Create fake dependencies of the whole panel on its individual tiles. // These tasks are inserted to generate a correct DAG rather than // doing any useful work. for (int m = k+1; m < A.mt-1; m++) { plasma_complex64_t *amk = A(m, k); #pragma omp task depend (in:amk[0]) \ depend (inout:a00[0]) \ priority(1) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } int ma00k = (A.mt-k-1)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); int mvak = plasma_tile_mview(A, k); int ldak = plasma_tile_mmain(A, k); int num_panel_threads = imin(plasma->max_panel_threads, minmtnt-k); // panel #pragma omp task depend(inout:a00[0:ma00k*na00k]) \ depend(inout:a20[0:lda20*nvak]) \ depend(out:ipiv[k*A.mb:mvak]) \ priority(1) { volatile int *max_idx = (int*)malloc(num_panel_threads*sizeof(int)); if (max_idx == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile plasma_complex64_t *max_val = (plasma_complex64_t*)malloc(num_panel_threads*sizeof( plasma_complex64_t)); if (max_val == NULL) plasma_request_fail(sequence, request, PlasmaErrorOutOfMemory); volatile int info = 0; plasma_barrier_t barrier; plasma_barrier_init(&barrier); if (sequence->status == PlasmaSuccess) { // If nesting would not be expensive on architectures such as // KNL, this would resolve the issue with deadlocks caused by // tasks expected to run are in fact not launched. //#pragma omp parallel for shared(barrier) // schedule(dynamic,1) // num_threads(num_panel_threads) #pragma omp taskloop untied shared(barrier) \ num_tasks(num_panel_threads) \ priority(2) for (int rank = 0; rank < num_panel_threads; rank++) { { plasma_desc_t view = plasma_desc_view(A, k*A.mb, k*A.nb, A.m-k*A.mb, nvak); plasma_core_zgetrf(view, &ipiv[k*A.mb], ib, rank, num_panel_threads, max_idx, max_val, &info, &barrier); if (info != 0) plasma_request_fail(sequence, request, k*A.mb+info); } } } #pragma omp taskwait free((void*)max_idx); free((void*)max_val); for (int i = k*A.mb+1; i <= imin(A.m, k*A.mb+nvak); i++) ipiv[i-1] += k*A.mb; } // update for (int n = k+1; n < A.nt; n++) { plasma_complex64_t *a01, *a11, *a21; a01 = A(k, n); a11 = A(k+1, n); a21 = A(A.mt-1, n); int ma11k = (A.mt-k-2)*A.mb; int na11n = plasma_tile_nmain(A, n); int lda21 = plasma_tile_mmain(A, A.mt-1); int nvan = plasma_tile_nview(A, n); #pragma omp task depend(in:a00[0:ma00k*na00k]) \ depend(in:a20[0:lda20*nvak]) \ depend(in:ipiv[k*A.mb:mvak]) \ depend(inout:a01[0:ldak*nvan]) \ depend(inout:a11[0:ma11k*na11n]) \ depend(inout:a21[0:lda21*nvan]) \ priority(n == k+1) { if (sequence->status == PlasmaSuccess) { // geswp int k1 = k*A.mb+1; int k2 = imin(k*A.mb+A.mb, A.m); plasma_desc_t view = plasma_desc_view(A, 0, n*A.nb, A.m, nvan); plasma_core_zgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); // trsm plasma_core_ztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, mvak, nvan, 1.0, A(k, k), ldak, A(k, n), ldak); // gemm for (int m = k+1; m < A.mt; m++) { int mvam = plasma_tile_mview(A, m); int ldam = plasma_tile_mmain(A, m); #pragma omp task priority(n == k+1) { plasma_core_zgemm( PlasmaNoTrans, PlasmaNoTrans, mvam, nvan, A.nb, -1.0, A(m, k), ldam, A(k, n), ldak, 1.0, A(m, n), ldam); } } } #pragma omp taskwait } } } // Multidependency of the whole ipiv on the individual chunks // corresponding to tiles. for (int m = 0; m < minmtnt; m++) { // insert dummy task #pragma omp task depend (in:ipiv[m*A.mb]) \ depend (inout:ipiv[0]) { int l = 1; l++; } } // pivoting to the left for (int k = 0; k < minmtnt-1; k++) { plasma_complex64_t *a10, *a20; a10 = A(k+1, k); a20 = A(A.mt-1, k); int ma10k = (A.mt-k-2)*A.mb; int na00k = plasma_tile_nmain(A, k); int lda20 = plasma_tile_mmain(A, A.mt-1); int nvak = plasma_tile_nview(A, k); #pragma omp task depend(in:ipiv[0:imin(A.m,A.n)]) \ depend(inout:a10[0:ma10k*na00k]) \ depend(inout:a20[0:lda20*nvak]) { if (sequence->status == PlasmaSuccess) { plasma_desc_t view = plasma_desc_view(A, 0, k*A.nb, A.m, A.nb); int k1 = (k+1)*A.mb+1; int k2 = imin(A.m, A.n); plasma_core_zgeswp(PlasmaRowwise, view, k1, k2, ipiv, 1); } } // Multidependency of individual tiles on the whole panel. for (int m = k+2; m < A.mt-1; m++) { plasma_complex64_t *amk = A(m, k); #pragma omp task depend (in:a10[0]) \ depend (inout:amk[0]) { // Do some funny work here. It appears so that the compiler // might not insert the task if it is completely empty. int l = 1; l++; } } } }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2013 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/Functions.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/traits/SubvectorExprTrait.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/SMP.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/logging/FunctionTrace.h> #include <blaze/util/mpl/And.h> #include <blaze/util/mpl/Not.h> #include <blaze/util/mpl/Or.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> #include <blaze/util/typetraits/IsSame.h> namespace blaze { //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 > // Transpose flag of the right-hand side dense vector void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<VT1> ET1; typedef ElementType_<VT2> ET2; typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget; typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size }; const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); assign( target, subvector<aligned>( ~rhs, index, size ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); assign( target, subvector<unaligned>( ~rhs, index, size ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); assign( target, subvector<aligned>( ~rhs, index, size ) ); } else { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); assign( target, subvector<unaligned>( ~rhs, index, size ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 > // Transpose flag of the right-hand side sparse vector void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); assign( target, subvector<unaligned>( ~rhs, index, size ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be added. // \return void // // This function is the backend implementation the OpenMP-based SMP addition assignment of a // dense vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 > // Transpose flag of the right-hand side dense vector void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<VT1> ET1; typedef ElementType_<VT2> ET2; typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget; typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size }; const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); addAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); addAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); addAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); addAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function is the backend implementation the OpenMP-based SMP addition assignment of a // sparse vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 > // Transpose flag of the right-hand side sparse vector void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); addAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpAddAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be subtracted. // \return void // // This function is the backend implementation the OpenMP-based SMP subtraction assignment of a // dense vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 > // Transpose flag of the right-hand side dense vector void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<VT1> ET1; typedef ElementType_<VT2> ET2; typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget; typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size }; const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); subAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); subAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); subAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); subAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function is the backend implementation of the OpenMP-based SMP subtraction assignment of // a sparse vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 > // Transpose flag of the right-hand side sparse vector void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); subAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpSubAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP multiplication assignment of a dense vector to a // dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function is the backend implementation of the OpenMP-based SMP multiplication assignment // of a dense vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 > // Transpose flag of the right-hand side dense vector void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<VT1> ET1; typedef ElementType_<VT2> ET2; typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget; typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size }; const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); multAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); multAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); multAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); multAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP multiplication assignment of a sparse vector to a // dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be multiplied. // \return void // // This function is the backend implementation of the OpenMP-based SMP multiplication assignment // of a sparse vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 > // Transpose flag of the right-hand side sparse vector void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); multAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpMultAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP division assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function is the backend implementation of the OpenMP-based SMP division assignment of // a dense vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 > // Transpose flag of the right-hand side dense vector void smpDivAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); typedef ElementType_<VT1> ET1; typedef ElementType_<VT2> ET2; typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget; typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget; enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size }; const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame_<ET1,ET2> ); const bool lhsAligned ( (~lhs).isAligned() ); const bool rhsAligned ( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); divAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else if( simdEnabled && lhsAligned ) { AlignedTarget target( subvector<aligned>( ~lhs, index, size ) ); divAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } else if( simdEnabled && rhsAligned ) { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); divAssign( target, subvector<aligned>( ~rhs, index, size ) ); } else { UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) ); divAssign( target, subvector<unaligned>( ~rhs, index, size ) ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) smpDivAssign_backend( ~lhs, ~rhs ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
single.c
#include <omp.h> #define N 10 int main (int argc, char * argv[]){ double a[N], b[N], c[N]; int i; for(i=0; i<N; i++) a[i] = 0; for(i=0; i<N; i++) b[i] = 0; for(i=0; i<N; i++) c[i] = 0; #pragma omp parallel #pragma omp for for(int i = 0; i < N ; i ++) a[i] = b[i] + c[i]; }
ast-dump-openmp-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:4:1, col:30> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:10:1, col:30> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:17:1, col:42> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPParallelForSimdDirective {{.*}} <line:24:1, col:42> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:31, col:41> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:40> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:40> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPParallelForSimdDirective {{.*}} <line:31:1, col:42> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:31, col:41> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:40> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:40> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
pr53580.c
/* PR middle-end/53580 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ int main () { int x, y, v = 0; #pragma omp parallel #pragma omp for for (x = 0; x < 10; x++) { #pragma omp for reduction(+: v) /* { dg-error "work-sharing region may not be closely nested inside of work-sharing" } */ for (y = 0; y < 10; y++) v++; } return v - 100; }
task1.h
#pragma once #include <iostream> #include "util.h" inline int minmaxP(int* M, const size_t& h, const size_t& w) { int max = 0; #pragma omp parallel { #pragma omp for for (auto y = 0; y < h; ++y) { int strMin = INT32_MAX; for (auto x = 0; x < w; ++x) { const int val = M[w * y + x]; if (val < strMin) { strMin = val; } } #pragma omp critical { if (strMin > max) { max = strMin; } } } } return max; } inline int minmax(int* M, const size_t& h, const size_t& w) { int max = 0; for (auto y = 0; y < h; ++y) { int strMin = INT32_MAX; for (auto x = 0; x < w; ++x) { const int val = M[w * y + x]; if (val < strMin) { strMin = val; } } { if (strMin > max) { max = strMin; } } } return max; } int task1() { const int h = 5000; const int w = 3000; auto M = new int[h * w]; fill(M, h * w, 40); std::cout << minmax(M, h, w) << "|" << minmaxP(M, h, w) << std::endl; delete[] M; }
diffusion_grid.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef CORE_DIFFUSION_GRID_H_ #define CORE_DIFFUSION_GRID_H_ #include <assert.h> #include <algorithm> #include <array> #include <cmath> #include <functional> #include <iostream> #include <string> #include <vector> #include "core/util/root.h" #include "core/container/math_array.h" #include "core/container/parallel_resize_vector.h" #include "core/param/param.h" #include "core/util/log.h" #include "core/util/math.h" namespace bdm { /// A class that computes the diffusion of extracellular substances /// It maintains the concentration and gradient of a single substance class DiffusionGrid { public: explicit DiffusionGrid(TRootIOCtor* p) {} DiffusionGrid(int substance_id, std::string substance_name, double dc, double mu, int resolution = 11) : substance_(substance_id), substance_name_(substance_name), dc_({{1 - dc, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6, dc / 6}}), mu_(mu), resolution_(resolution) {} virtual ~DiffusionGrid() {} /// @brief Initializes the grid by calculating the grid dimensions /// and number of boxes along the axis from the input arguments /// /// @param[in] grid_dimensions The grid dimensions /// @param[in] box_length The box length /// void Initialize(const std::array<int32_t, 6>& grid_dimensions) { // Get grid properties from neighbor grid grid_dimensions_ = grid_dimensions; assert(resolution_ > 0 && "The resolution cannot be zero!"); num_boxes_axis_[0] = resolution_; num_boxes_axis_[1] = resolution_; num_boxes_axis_[2] = resolution_; // Example: diffusion grid dimensions from 0-40 and resolution // of 4. Resolution must be adjusted otherwise one data pointer will be // missing. // Without adjustment: // box_length_: 10 // data points {0, 10, 20, 30} - 40 will be misssing! // With adjustment // box_length_: 13.3 // data points: {0, 13.3, 26.6, 39.9} box_length_ = (grid_dimensions_[1] - grid_dimensions_[0]) / static_cast<double>(resolution_ - 1); ParametersCheck(); box_volume_ = box_length_ * box_length_ * box_length_; assert(box_length_ > 0 && "Box length of diffusion grid must be greater than zero!"); // Set the parity of the number of boxes along the dimensions (since all // dimensions are the same, we just take the x-axis here) parity_ = num_boxes_axis_[0] % 2; total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; // Allocate memory for the concentration and gradient arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); initialized_ = true; } void ParametersCheck() { // The 1.0 is to impose floating point operations if ((1.0 * (1 - dc_[0]) * dt_) / (1.0 * box_length_ * box_length_) >= (1.0 / 6)) { Log::Fatal( "DiffusionGrid", "The specified parameters of the diffusion grid with substance [", substance_name_, "] will result in unphysical behavior (diffusion coefficient = ", (1 - dc_[0]), ", resolution = ", resolution_, "). Please refer to the user guide for more information."); } } void RunInitializers() { assert(num_boxes_axis_[0] > 0 && "The number of boxes along an axis was found to be zero!"); if (initializers_.empty()) { return; } auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; // Apply all functions that initialize this diffusion grid for (size_t f = 0; f < initializers_.size(); f++) { for (size_t x = 0; x < nx; x++) { double real_x = grid_dimensions_[0] + x * box_length_; for (size_t y = 0; y < ny; y++) { double real_y = grid_dimensions_[2] + y * box_length_; for (size_t z = 0; z < nz; z++) { double real_z = grid_dimensions_[4] + z * box_length_; std::array<uint32_t, 3> box_coord; box_coord[0] = x; box_coord[1] = y; box_coord[2] = z; size_t idx = GetBoxIndex(box_coord); IncreaseConcentrationBy(idx, initializers_[f](real_x, real_y, real_z)); } } } } // Clear the initializer to free up space initializers_.clear(); initializers_.shrink_to_fit(); } /// @brief Updates the grid dimensions, based on the given threshold /// values. The diffusion grid dimensions need always be larger /// than the neighbor grid dimensions, so that each simulation /// object can obtain its local concentration / gradient /// /// @param[in] threshold_dimensions The threshold values /// void Update(const std::array<int32_t, 2>& threshold_dimensions) { // Update the grid dimensions such that each dimension ranges from // {treshold_dimensions[0] - treshold_dimensions[1]} auto min_gd = threshold_dimensions[0]; auto max_gd = threshold_dimensions[1]; grid_dimensions_ = {min_gd, max_gd, min_gd, max_gd, min_gd, max_gd}; // If the grid is not perfectly divisible along each dimension by the // box length, extend the grid so that it is int dimension_length = max_gd - min_gd; for (int i = 0; i < 3; i++) { int r = fmod(dimension_length, box_length_); if (r > 1e-9) { // std::abs for the case that box_length_ > dimension_length grid_dimensions_[2 * i + 1] += (box_length_ - r); } } // Calculate by how many boxes each dimension has grown int new_dimension_length = grid_dimensions_[1] - grid_dimensions_[0]; int new_num_boxes = std::ceil(new_dimension_length / box_length_); int growth = new_num_boxes - num_boxes_axis_[0]; if (growth > 0) { // Store the old number of boxes along each axis for comparison std::array<size_t, 3> tmp_num_boxes_axis = num_boxes_axis_; // Increase number of boxes along axis accordingly num_boxes_axis_[0] += growth; num_boxes_axis_[1] += growth; num_boxes_axis_[2] += growth; // We need to maintain the parity of the number of boxes along each // dimension, otherwise copying of the substances to the increases grid // will not be symmetrically done; resulting in shifting of boxes // We add a box in the negative direction, because the only way the parity // could have changed is because of adding a box in the positive direction // (due to the grid not being perfectly divisible; see above) if (num_boxes_axis_[0] % 2 != parity_) { for (int i = 0; i < 3; i++) { grid_dimensions_[2 * i] -= box_length_; num_boxes_axis_[i]++; } } // Temporarily save previous grid data auto tmp_c1 = c1_; auto tmp_gradients = gradients_; c1_.clear(); c2_.clear(); gradients_.clear(); total_num_boxes_ = num_boxes_axis_[0] * num_boxes_axis_[1] * num_boxes_axis_[2]; CopyOldData(tmp_c1, tmp_gradients, tmp_num_boxes_axis); assert(total_num_boxes_ >= tmp_num_boxes_axis[0] * tmp_num_boxes_axis[1] * tmp_num_boxes_axis[2] && "The diffusion grid tried to shrink! It can only become larger"); } } /// Copies the concentration and gradients values to the new /// (larger) grid. In the 2D case it looks like the following: /// /// [0 0 0 0] /// [v1 v2] --> [0 v1 v2 0] /// [v3 v4] --> [0 v3 v4 0] /// [0 0 0 0] /// /// The dimensions are doubled in this case from 2x2 to 4x4 /// If the dimensions would be increased from 2x2 to 3x3, it will still /// be increased to 4x4 in order for GetBoxIndex to function correctly /// void CopyOldData(const ParallelResizeVector<double>& old_c1, const ParallelResizeVector<double>& old_gradients, const std::array<size_t, 3>& old_num_boxes_axis) { // Allocate more memory for the grid data arrays c1_.resize(total_num_boxes_); c2_.resize(total_num_boxes_); gradients_.resize(3 * total_num_boxes_); auto incr_dim_x = num_boxes_axis_[0] - old_num_boxes_axis[0]; auto incr_dim_y = num_boxes_axis_[1] - old_num_boxes_axis[1]; auto incr_dim_z = num_boxes_axis_[2] - old_num_boxes_axis[2]; int off_x = incr_dim_x / 2; int off_y = incr_dim_y / 2; int off_z = incr_dim_z / 2; int num_box_xy = num_boxes_axis_[0] * num_boxes_axis_[1]; int old_box_xy = old_num_boxes_axis[0] * old_num_boxes_axis[1]; int new_origin = off_z * (num_boxes_axis_[0] * num_boxes_axis_[1]) + off_y * num_boxes_axis_[0] + off_x; for (size_t k = 0; k < old_num_boxes_axis[2]; k++) { int offset = new_origin + k * num_box_xy; for (size_t j = 0; j < old_num_boxes_axis[1]; j++) { if (j != 0) { offset += num_boxes_axis_[0]; } for (size_t i = 0; i < old_num_boxes_axis[0]; i++) { auto idx = k * old_box_xy + j * old_num_boxes_axis[0] + i; c1_[offset + i] = old_c1[idx]; gradients_[3 * (offset + i)] = old_gradients[3 * idx]; gradients_[3 * (offset + i) + 1] = old_gradients[3 * idx + 1]; gradients_[3 * (offset + i) + 2] = old_gradients[3 * idx + 2]; } } } } /// Solves a 5-point stencil diffusion equation, with leaking-edge /// boundary conditions. Substances are allowed to leave the simulation /// space. This prevents building up concentration at the edges /// void DiffuseWithLeakingEdge() { int nx = num_boxes_axis_[0]; int ny = num_boxes_axis_[1]; int nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (int yy = 0; yy < ny; yy += YBF) { for (int z = 0; z < nz; z++) { // To let the edges bleed we set some diffusion coefficients // to zero. This prevents substance building up at the edges auto dc_2_ = dc_; int ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (int y = yy; y < ymax; y++) { dc_2_ = dc_; int x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; if (y == 0) { n = c; dc_2_[4] = 0; } else { n = c - nx; } if (y == (ny - 1)) { s = c; dc_2_[3] = 0; } else { s = c + nx; } if (z == 0) { b = c; dc_2_[5] = 0; } else { b = c - nx * ny; } if (z == (nz - 1)) { t = c; dc_2_[6] = 0; } else { t = c + nx * ny; } // x = 0; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + 0 * c1_[c] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + dc_2_[2] * c1_[c + 1] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; // x = nx-1; we leak out substances past this edge (so multiply by 0) c2_[c] = (dc_2_[0] * c1_[c] + dc_2_[1] * c1_[c - 1] + 0 * c1_[c] + dc_2_[3] * c1_[s] + dc_2_[4] * c1_[n] + dc_2_[5] * c1_[b] + dc_2_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Solves a 5-point stencil diffusion equation, with closed-edge /// boundary conditions. Substances are not allowed to leave the simulation /// space. Keep in mind that the concentration can build up at the edges /// void DiffuseWithClosedEdge() { auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x; int c, n, s, b, t; x = 0; c = x + y * nx + z * nx * ny; n = (y == 0) ? c : c - nx; s = (y == ny - 1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz - 1) ? c : c + nx * ny; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c + 1] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (dc_[0] * c1_[c] + dc_[1] * c1_[c - 1] + dc_[2] * c1_[c] + dc_[3] * c1_[s] + dc_[4] * c1_[n] + dc_[5] * c1_[b] + dc_[6] * c1_[t]) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEuler() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; if (y == 0 || y == (ny - 1) || z == 0 || z == (nz - 1)) { continue; } n = c - nx; s = c + nx; b = c - nx * ny; t = c + nx * ny; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; } // tile ny } // tile nz } // block ny c1_.swap(c2_); } void DiffuseEulerLeakingEdge() { // check if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate diffusion update if (IsFixedSubstance()) { return; } const auto nx = num_boxes_axis_[0]; const auto ny = num_boxes_axis_[1]; const auto nz = num_boxes_axis_[2]; const double ibl2 = 1 / (box_length_ * box_length_); const double d = 1 - dc_[0]; std::array<int, 4> l; #define YBF 16 #pragma omp parallel for collapse(2) for (size_t yy = 0; yy < ny; yy += YBF) { for (size_t z = 0; z < nz; z++) { size_t ymax = yy + YBF; if (ymax >= ny) { ymax = ny; } for (size_t y = yy; y < ymax; y++) { size_t x = 0; int c, n, s, b, t; c = x + y * nx + z * nx * ny; l.fill(1); if (y == 0) { n = c; l[0] = 0; } else { n = c - nx; } if (y == ny - 1) { s = c; l[1] = 0; } else { s = c + nx; } if (z == 0) { b = c; l[2] = 0; } else { b = c - nx * ny; } if (z == nz - 1) { t = c; l[3] = 0; } else { t = c + nx * ny; } c2_[c] = (c1_[c] + d * dt_ * (0 - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); #pragma omp simd for (x = 1; x < nx - 1; x++) { ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + c1_[c + 1]) * ibl2 + d * dt_ * (l[0] * c1_[s] - 2 * c1_[c] + l[1] * c1_[n]) * ibl2 + d * dt_ * (l[2] * c1_[b] - 2 * c1_[c] + l[3] * c1_[t]) * ibl2) * (1 - mu_); } ++c; ++n; ++s; ++b; ++t; c2_[c] = (c1_[c] + d * dt_ * (c1_[c - 1] - 2 * c1_[c] + 0) * ibl2 + d * dt_ * (c1_[s] - 2 * c1_[c] + c1_[n]) * ibl2 + d * dt_ * (c1_[b] - 2 * c1_[c] + c1_[t]) * ibl2) * (1 - mu_); } // tile ny } // tile nz } // block ny c1_.swap(c2_); } /// Calculates the gradient for each box in the diffusion grid. /// The gradient is calculated in each direction (x, y, z) as following: /// /// c(x + box_length_) - c(x - box_length) / (2 * box_length_), /// /// where c(x) implies the concentration at position x /// /// At the edges the gradient is the same as the box next to it void CalculateGradient() { // check if gradient has been calculated once // and if diffusion coefficient and decay constant are 0 // i.e. if we don't need to calculate gradient update if (init_gradient_ && IsFixedSubstance()) { return; } double gd = 1 / (box_length_ * 2); auto nx = num_boxes_axis_[0]; auto ny = num_boxes_axis_[1]; auto nz = num_boxes_axis_[2]; #pragma omp parallel for collapse(2) for (size_t z = 0; z < nz; z++) { for (size_t y = 0; y < ny; y++) { for (size_t x = 0; x < nx; x++) { int c, e, w, n, s, b, t; c = x + y * nx + z * nx * ny; if (x == 0) { e = c; w = c + 2; } else if (x == nx - 1) { e = c - 2; w = c; } else { e = c - 1; w = c + 1; } if (y == 0) { n = c + 2 * nx; s = c; } else if (y == ny - 1) { n = c; s = c - 2 * nx; } else { n = c + nx; s = c - nx; } if (z == 0) { t = c + 2 * nx * ny; b = c; } else if (z == nz - 1) { t = c; b = c - 2 * nx * ny; } else { t = c + nx * ny; b = c - nx * ny; } // Let the gradient point from low to high concentration gradients_[3 * c + 0] = (c1_[w] - c1_[e]) * gd; gradients_[3 * c + 1] = (c1_[n] - c1_[s]) * gd; gradients_[3 * c + 2] = (c1_[t] - c1_[b]) * gd; } } } if (!init_gradient_) { init_gradient_ = true; } } /// Increase the concentration at specified position with specified amount void IncreaseConcentrationBy(const Double3& position, double amount) { auto idx = GetBoxIndex(position); IncreaseConcentrationBy(idx, amount); } /// Increase the concentration at specified box with specified amount void IncreaseConcentrationBy(size_t idx, double amount) { assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); c1_[idx] += amount; if (c1_[idx] > concentration_threshold_) { c1_[idx] = concentration_threshold_; } } /// Get the concentration at specified position double GetConcentration(const Double3& position) const { return c1_[GetBoxIndex(position)]; } /// Get the (normalized) gradient at specified position void GetGradient(const Double3& position, Double3* gradient) const { auto idx = GetBoxIndex(position); assert(idx < total_num_boxes_ && "Cell position is out of diffusion grid bounds"); (*gradient)[0] = gradients_[3 * idx]; (*gradient)[1] = gradients_[3 * idx + 1]; (*gradient)[2] = gradients_[3 * idx + 2]; auto norm = std::sqrt((*gradient)[0] * (*gradient)[0] + (*gradient)[1] * (*gradient)[1] + (*gradient)[2] * (*gradient)[2]); if (norm > 1e-10) { (*gradient)[0] /= norm; (*gradient)[1] /= norm; (*gradient)[2] /= norm; } } std::array<uint32_t, 3> GetBoxCoordinates(const Double3& position) const { std::array<uint32_t, 3> box_coord; box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_; box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_; box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_; return box_coord; } size_t GetBoxIndex(const std::array<uint32_t, 3>& box_coord) const { size_t ret = box_coord[2] * num_boxes_axis_[0] * num_boxes_axis_[1] + box_coord[1] * num_boxes_axis_[0] + box_coord[0]; return ret; } /// Calculates the box index of the substance at specified position size_t GetBoxIndex(const Double3& position) const { auto box_coord = GetBoxCoordinates(position); return GetBoxIndex(box_coord); } void SetDecayConstant(double mu) { mu_ = mu; } void SetConcentrationThreshold(double t) { concentration_threshold_ = t; } double GetConcentrationThreshold() const { return concentration_threshold_; } const double* GetAllConcentrations() const { return c1_.data(); } const double* GetAllGradients() const { return gradients_.data(); } const std::array<size_t, 3>& GetNumBoxesArray() const { return num_boxes_axis_; } size_t GetNumBoxes() const { return total_num_boxes_; } double GetBoxLength() const { return box_length_; } int GetSubstanceId() const { return substance_; } const std::string& GetSubstanceName() const { return substance_name_; } double GetDecayConstant() const { return mu_; } const int32_t* GetDimensionsPtr() const { return grid_dimensions_.data(); } const std::array<int32_t, 6>& GetDimensions() const { return grid_dimensions_; } const std::array<double, 7>& GetDiffusionCoefficients() const { return dc_; } bool IsInitialized() const { return initialized_; } int GetResolution() const { return resolution_; } double GetBoxVolume() const { return box_volume_; } template <typename F> void AddInitializer(F function) { initializers_.push_back(function); } // retrun true if substance concentration and gradient don't evolve over time bool IsFixedSubstance() { return (mu_ == 0 && dc_[1] == 0 && dc_[2] == 0 && dc_[3] == 0 && dc_[4] == 0 && dc_[5] == 0 && dc_[6] == 0); } private: /// The id of the substance of this grid int substance_ = 0; /// The name of the substance of this grid std::string substance_name_ = ""; /// The side length of each box double box_length_ = 0; /// the volume of each box double box_volume_ = 0; /// The array of concentration values ParallelResizeVector<double> c1_ = {}; /// An extra concentration data buffer for faster value updating ParallelResizeVector<double> c2_ = {}; /// The array of gradients (x, y, z) ParallelResizeVector<double> gradients_ = {}; /// The maximum concentration value that a box can have double concentration_threshold_ = 1e15; /// The diffusion coefficients [cc, cw, ce, cs, cn, cb, ct] std::array<double, 7> dc_ = {{0}}; /// The timestep resolution fhe diffusion grid // TODO(ahmad): this probably needs to scale with Param::simulation_timestep double dt_ = 1; /// The decay constant double mu_ = 0; /// The grid dimensions of the diffusion grid std::array<int32_t, 6> grid_dimensions_ = {{0}}; /// The number of boxes at each axis [x, y, z] std::array<size_t, 3> num_boxes_axis_ = {{0}}; /// The total number of boxes in the diffusion grid size_t total_num_boxes_ = 0; /// Flag to determine if this grid has been initialized bool initialized_ = false; /// The resolution of the diffusion grid int resolution_ = 0; /// If false, grid dimensions are even; if true, they are odd bool parity_ = false; /// A list of functions that initialize this diffusion grid std::vector<std::function<double(double, double, double)>> initializers_ = {}; // turn to true after gradient initialization bool init_gradient_ = false; BDM_CLASS_DEF_NV(DiffusionGrid, 1); }; } // namespace bdm #endif // CORE_DIFFUSION_GRID_H_
SpatialConvolutionLocal.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionLocal.c" #else static inline void THNN_(SpatialConvolutionLocal_shapeCheck)( THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kH, int kW, int dH, int dW, int padH, int padW, int64_t inputHeight, int64_t inputWidth, int64_t outputHeight, int64_t outputWidth) { THArgCheck(kW > 0 && kH > 0, 9, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->dim(); int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input, "non-empty 3D or 4D input tensor expected but got: %s"); int64_t nInputPlane = weight->size(2) / (kH * kW); int64_t nOutputPlane = weight->size(1); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 3, 0, nOutputPlane); THNN_CHECK_DIM_SIZE(bias, 3, 1, outputHeight); THNN_CHECK_DIM_SIZE(bias, 3, 2, outputWidth); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); if (gradOutput != NULL) { THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(view_weight_local)(THTensor *_weight) { THTensor *weight = THTensor_(newContiguous)(_weight); AT_CHECK(!weight->is_empty() && (weight->dim() == 3 || weight->dim() == 6), "weight tensor should be (non-empty) 3D or 6D - got size: ", weight->sizes()); if (weight->dim() == 6) { int64_t s1 = weight->size(0) * weight->size(1); int64_t s2 = weight->size(2); int64_t s3 = weight->size(3) * weight->size(4) * weight->size(5); THTensor *old_weight = weight; weight = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(weight), weight->storage_offset(), s1, -1, s2, -1, s3, -1); THTensor_(free)(old_weight); } return weight; } static void THNN_(SpatialConvolutionLocal_updateOutput_frame) ( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *output3d, *finput3d; THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); THTensor_(copy)(output, bias); output3d = THTensor_(newWithStorage3d) (THTensor_getStoragePtr(output), output->storage_offset(), outputHeight * outputWidth, 1, nOutputPlane, outputHeight * outputWidth, 1, nOutputPlane * outputHeight * outputWidth); finput3d = THTensor_(newWithStorage3d) (THTensor_getStoragePtr(finput), finput->storage_offset(), outputHeight * outputWidth, 1, kW * kH * nInputPlane, outputHeight * outputWidth, 1, kW * kH * nInputPlane * outputHeight * outputWidth); // weight: oH*oW x nOutputPlane x nInputPlane*kH*kW // finput3d: oH*oW x nInputPlane*kH*kW x 1 THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d); // output3d: oH*oW x nOutputPlane x 1 THTensor_(free)(output3d); THTensor_(free)(finput3d); } void THNN_(SpatialConvolutionLocal_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) { weight = THNN_(view_weight_local)(weight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); int64_t nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH); int64_t nOutputPlane = THTensor_(size)(weight, 1); if(input->dim() == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); THNN_(SpatialConvolutionLocal_updateOutput_frame) (input, output, weight, bias, finput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionLocal_updateOutput_frame) (input_t, output_t, weight, bias, finput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionLocal_updateGradInput_frame) (THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *gradOutput3d, *fgradInput3d; gradOutput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); fgradInput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(fgradInput), fgradInput->storage_offset(), outputHeight*outputWidth, 1, kW*kH*nInputPlane, outputHeight*outputWidth, 1, kW*kH*nInputPlane*outputHeight*outputWidth); // weight: oH*oW x nInputPlane*kH*kW x nOutputPlane // gradOutput3d: oH*oW x nOutputPlane x 1 THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d); // fgradInput3d: oH*oW x nInputPlane*kH*kW x 1 THTensor_(free)(gradOutput3d); THTensor_(free)(fgradInput3d); THTensor_(zero)(gradInput); THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); } void THNN_(SpatialConvolutionLocal_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight) { weight = THNN_(view_weight_local)(weight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); int64_t nInputPlane = THTensor_(size)(weight,2)/(kW*kH); int64_t nOutputPlane = THTensor_(size)(weight,1); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 1, 2); if(input->dim() == 3) { THNN_(SpatialConvolutionLocal_updateGradInput_frame) (gradInput, gradOutput, tweight, fgradInput, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size(0); int64_t t; #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(SpatialConvolutionLocal_updateGradInput_frame) (gradInput_t, gradOutput_t, tweight, fgradInput_t, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } } THTensor_(free)(tweight); THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(weight); } static void THNN_(SpatialConvolutionLocal_accGradParameters_frame) (THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale, int kW, int kH, int dW, int dH, int padW, int padH, int64_t nInputPlane, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputWidth, int64_t outputHeight) { THTensor *gradOutput3d, *finput3d; gradOutput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), outputHeight*outputWidth, 1, nOutputPlane, outputHeight*outputWidth, 1, nOutputPlane*outputHeight*outputWidth); finput3d = THTensor_(newWithStorage3d)(THTensor_getStoragePtr(finput), finput->storage_offset(), outputHeight*outputWidth, 1, 1, kW*kH*nInputPlane*outputHeight*outputWidth, kW*kH*nInputPlane, outputHeight*outputWidth); // gradOutput3d: oH*oW x nOutputPlane x 1 // finput3d: oH*oW x 1 x kW*kH*nInputPlane THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d); // gradWeight: oH*oW x nOutputPlane x kW*kH*nInputPlane THTensor_(cadd)(gradBias, gradBias, scale, gradOutput); THTensor_(free)(gradOutput3d); THTensor_(free)(finput3d); } void THNN_(SpatialConvolutionLocal_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, int64_t inputWidth, int64_t inputHeight, int64_t outputWidth, int64_t outputHeight, accreal scale_) { THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous"); THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous"); real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); gradWeight = THNN_(view_weight_local)(gradWeight); THNN_(SpatialConvolutionLocal_shapeCheck) (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, inputHeight, inputWidth, outputHeight, outputWidth); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); int64_t nInputPlane = THTensor_(size)(gradWeight,2)/(kW*kH); int64_t nOutputPlane = THTensor_(size)(gradWeight,1); if(input->dim() == 3) { THNN_(SpatialConvolutionLocal_accGradParameters_frame) (gradOutput, gradWeight, gradBias, finput, scale, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { int64_t T = input->size(0); int64_t t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(SpatialConvolutionLocal_accGradParameters_frame) (gradOutput_t, gradWeight, gradBias, finput_t, scale, kW, kH, dW, dH, padW, padH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } THTensor_(free)(input); THTensor_(free)(gradOutput); THTensor_(free)(gradWeight); } #endif
blake2bp-ref.c
/* BLAKE2 reference source code package - reference C implementations Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include "cryptoTools/Common/config.h" #ifndef ENABLE_BLAKE2_SSE #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 /* blake2b_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2bp_init_leaf_param( blake2b_state *S, const blake2b_param *P ) { int err = blake2b_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2bp_init_leaf( blake2b_state *S, size_t outlen, size_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, (uint32_t)offset ); store32( &P->xof_length, 0 ); P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2bp_init_leaf_param( S, P ); } static int blake2bp_init_root( blake2b_state *S, size_t outlen, size_t keylen ) { blake2b_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; store32( &P->leaf_length, 0 ); store32( &P->node_offset, 0 ); store32( &P->xof_length, 0 ); P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[i], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, S->outlen ); } int blake2bp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[i], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > i * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[i], in__, len ); } blake2b_final( S[i], hash[i], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; /* Mark as last node */ for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen );; } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2bp( hash, BLAKE2B_OUTBYTES, buf, i, key, BLAKE2B_KEYBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2B_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2B_OUTBYTES]; blake2bp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2bp_init_key(&S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2bp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2bp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2bp_final(&S, hash, BLAKE2B_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif #endif
integrate.c
/* * integrate.c: Example of numerical integration in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> const double PI = 3.14159265358979323846; const double a = -4.0; const double b = 4.0; const int nsteps = 40000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } double func(double x) { return exp(-x * x); } /* integrate: Integrates by rectangle method (midpoint rule) */ double integrate(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; for (int i = 0; i < n; i++) sum += func(a + h * (i + 0.5)); sum *= h; return sum; } double run_serial() { double t = wtime(); double res = integrate(func, a, b, nsteps); t = wtime() - t; printf("Result (serial): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } double integrate_omp(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; #pragma omp parallel { int nthreads = omp_get_num_threads(); int threadid = omp_get_thread_num(); int items_per_thread = n / nthreads; int lb = threadid * items_per_thread; int ub = (threadid == nthreads - 1) ? (n - 1) : (lb + items_per_thread - 1); for (int i = lb; i <= ub; i++) { double f = func(a + h * (i + 0.5)); #pragma omp critical // high overhead { sum += f; } } } sum *= h; return sum; } double run_parallel() { double t = wtime(); double res = integrate_omp(func, a, b, nsteps); t = wtime() - t; printf("Result (parallel): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } int main(int argc, char **argv) { printf("Integration f(x) on [%.12f, %.12f], nsteps = %d\n", a, b, nsteps); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
pi_omp_atomic_7.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdio.h> #include <omp.h> static long num_steps = 1024 * 1024 * 1024; double step; int main () { const int MAX_T = 16; int i, t; double x, pi; double start_time, run_time; step = 1.0/(double) num_steps; for(t = 1; t <= MAX_T; t*=2) { start_time = omp_get_wtime(); omp_set_num_threads(t); pi = 0.0; #pragma omp parallel { int i, nt; double x, sum = 0; i = omp_get_thread_num(); nt = omp_get_num_threads(); for (; i < num_steps; i += nt){ x = (i + 0.5) * step; sum += 4.0/(1.0+x*x); } #pragma omp atomic pi += sum; } pi = pi * step; run_time = omp_get_wtime() - start_time; printf("pi with %d threads: %.16lf in %lf seconds\n",t , pi,run_time); } }
GB_dense_subassign_25_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_25_template: C<M> = A where C is empty and A is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // C<M> = A where C starts as empty, M is structural, and A is dense. The // pattern of C is an exact copy of M. { //-------------------------------------------------------------------------- // get C, M, and A //-------------------------------------------------------------------------- GB_CTYPE *GB_RESTRICT Cx = C->x ; const int64_t *GB_RESTRICT Mp = M->p ; const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_CTYPE *GB_RESTRICT Ax = A->x ; const int64_t avlen = A->vlen ; //-------------------------------------------------------------------------- // C<M> = A //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; //---------------------------------------------------------------------- // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = (Mh == NULL) ? k : Mh [k] ; int64_t pM_start, pM_end ; GB_get_pA_and_pC (&pM_start, &pM_end, NULL, taskid, k, kfirst, klast, pstart_slice, NULL, NULL, Mp) ; // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; //------------------------------------------------------------------ // C<M(:,j)> = A(:,j) //------------------------------------------------------------------ GB_PRAGMA_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t p = pA + Mi [pM] ; GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p] } } } }
core.c
/* Generated by Cython 0.29.27 */ /* BEGIN: Cython Metadata { "distutils": { "name": "monotonic_align.core", "sources": [ "core.pyx" ] }, "module_name": "monotonic_align.core" } END: Cython Metadata */ #ifndef PY_SSIZE_T_CLEAN #define PY_SSIZE_T_CLEAN #endif /* PY_SSIZE_T_CLEAN */ #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_29_27" #define CYTHON_HEX_VERSION 0x001D1BF0 #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #undef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS 0 #undef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1) #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #ifndef CYTHON_USE_DICT_VERSIONS #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) #endif #ifndef CYTHON_USE_EXC_INFO_STACK #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #if PY_MAJOR_VERSION < 3 #include "longintrepr.h" #endif #undef SHIFT #undef BASE #undef MASK #ifdef SIZEOF_VOID_P enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; #endif #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_DefaultClassType PyType_Type #if PY_VERSION_HEX >= 0x030B00A1 static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f, PyObject *code, PyObject *c, PyObject* n, PyObject *v, PyObject *fv, PyObject *cell, PyObject* fn, PyObject *name, int fline, PyObject *lnos) { PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; const char *fn_cstr=NULL; const char *name_cstr=NULL; PyCodeObject* co=NULL; PyObject *type, *value, *traceback; PyErr_Fetch(&type, &value, &traceback); if (!(kwds=PyDict_New())) goto end; if (!(argcount=PyLong_FromLong(a))) goto end; if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; if (!(posonlyargcount=PyLong_FromLong(0))) goto end; if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; if (!(nlocals=PyLong_FromLong(l))) goto end; if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; if (!(stacksize=PyLong_FromLong(s))) goto end; if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; if (!(flags=PyLong_FromLong(f))) goto end; if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; Py_XDECREF((PyObject*)co); co = (PyCodeObject*)call_result; call_result = NULL; if (0) { cleanup_code_too: Py_XDECREF((PyObject*)co); co = NULL; } end: Py_XDECREF(kwds); Py_XDECREF(argcount); Py_XDECREF(posonlyargcount); Py_XDECREF(kwonlyargcount); Py_XDECREF(nlocals); Py_XDECREF(stacksize); Py_XDECREF(replace); Py_XDECREF(call_result); Py_XDECREF(empty); if (type) { PyErr_Restore(type, value, traceback); } return co; } #else #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #endif #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #ifndef METH_STACKLESS #define METH_STACKLESS 0 #endif #if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 #define PyMem_RawMalloc(n) PyMem_Malloc(n) #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) #define PyMem_RawFree(p) PyMem_Free(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) #include "pythread.h" #define Py_tss_NEEDS_INIT 0 typedef int Py_tss_t; static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { *key = PyThread_create_key(); return 0; } static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); *key = Py_tss_NEEDS_INIT; return key; } static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { PyObject_Free(key); } static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { return *key != Py_tss_NEEDS_INIT; } static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { PyThread_delete_key(*key); *key = Py_tss_NEEDS_INIT; } static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { return PyThread_set_key_value(*key, value); } static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { return PyThread_get_key_value(*key); } #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS #define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) #else #define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #if defined(PyUnicode_IS_READY) #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #else #define __Pyx_PyUnicode_READY(op) (0) #endif #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #endif #else #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) #endif #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #ifndef PyObject_Unicode #define PyObject_Unicode PyObject_Str #endif #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #if PY_VERSION_HEX >= 0x030900A4 #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) #else #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) #endif #if CYTHON_ASSUME_SAFE_MACROS #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) #else #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) #endif #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_MARK_ERR_POS(f_index, lineno) \ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } #define __PYX_ERR(f_index, lineno, Ln_error) \ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__monotonic_align__core #define __PYX_HAVE_API__monotonic_align__core /* Early includes */ #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { return (size_t) i < (size_t) limit; } #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime = NULL; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "core.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { int __pyx_n; float max_neg_val; }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":279 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":105 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":330 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":965 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #define __Pyx_BUILD_ASSERT_EXPR(cond)\ (sizeof(char [1 - 2*!(cond)]) - 1) #ifndef Py_MEMBER_SIZE #define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) #endif #if CYTHON_FAST_PYCALL static size_t __pyx_pyframe_localsplus_offset = 0; #include "frameobject.h" #define __Pxy_PyFrame_Initialize_Offsets()\ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) #define __Pyx_PyFrame_GetLocalsplus(frame)\ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) #endif // CYTHON_FAST_PYCALL #endif /* PyObjectCall2Args.proto */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* DivInt[Py_ssize_t].proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* ObjectGetItem.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); #else #define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) #endif /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* PyDictVersioning.proto */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS #define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) #define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ (version_var) = __PYX_GET_DICT_VERSION(dict);\ (cache_var) = (value); #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ (VAR) = __pyx_dict_cached_value;\ } else {\ (VAR) = __pyx_dict_cached_value = (LOOKUP);\ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ }\ } static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); #else #define __PYX_GET_DICT_VERSION(dict) (0) #define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) #define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); #endif /* GetModuleGlobalName.proto */ #if CYTHON_USE_DICT_VERSIONS #define __Pyx_GetModuleGlobalName(var, name) {\ static PY_UINT64_T __pyx_dict_version = 0;\ static PyObject *__pyx_dict_cached_value = NULL;\ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } #define __Pyx_GetModuleGlobalNameUncached(var, name) {\ PY_UINT64_T __pyx_dict_version;\ PyObject *__pyx_dict_cached_value;\ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ } static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); #else #define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) #define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* GetTopmostException.proto */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); #endif /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); __Pyx_SET_SIZE(list, len + 1); return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* DivInt[long].proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* PyObject_GenericGetAttrNoDict.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr #endif /* PyObject_GenericGetAttr.proto */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); #else #define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr #endif /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* PyObjectGetAttrStrNoError.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); /* GCCDiagnostics.proto */ #if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) #define __Pyx_HAS_GCC_DIAGNOSTIC #endif /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'cython.view' */ /* Module declarations from 'cython' */ /* Module declarations from 'monotonic_align.core' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "monotonic_align.core" extern int __pyx_module_is_main_monotonic_align__core; int __pyx_module_is_main_monotonic_align__core = 0; /* Implementation of 'monotonic_align.core' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_O[] = "O"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_t_xs[] = "t_xs"; static const char __pyx_k_t_ys[] = "t_ys"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_paths[] = "paths"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_values[] = "values"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_paths; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_t_xs; static PyObject *__pyx_n_s_t_ys; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_values; static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static float __pyx_k_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__16; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__14; static PyObject *__pyx_tuple__15; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__21; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_codeobj__26; /* Late includes */ /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { float __pyx_v_max_neg_val = __pyx_k_; int __pyx_v_x; int __pyx_v_y; float __pyx_v_v_prev; float __pyx_v_v_cur; int __pyx_v_index; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; long __pyx_t_4; int __pyx_t_5; long __pyx_t_6; long __pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; float __pyx_t_11; float __pyx_t_12; float __pyx_t_13; int __pyx_t_14; Py_ssize_t __pyx_t_15; Py_ssize_t __pyx_t_16; if (__pyx_optional_args) { if (__pyx_optional_args->__pyx_n > 0) { __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; } } /* "monotonic_align/core.pyx":13 * cdef float v_cur * cdef float tmp * cdef int index = t_x - 1 # <<<<<<<<<<<<<< * * for y in range(t_y): */ __pyx_v_index = (__pyx_v_t_x - 1); /* "monotonic_align/core.pyx":15 * cdef int index = t_x - 1 * * for y in range(t_y): # <<<<<<<<<<<<<< * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: */ __pyx_t_1 = __pyx_v_t_y; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_y = __pyx_t_3; /* "monotonic_align/core.pyx":16 * * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< * if x == y: * v_cur = max_neg_val */ __pyx_t_4 = (__pyx_v_y + 1); __pyx_t_5 = __pyx_v_t_x; if (((__pyx_t_4 < __pyx_t_5) != 0)) { __pyx_t_6 = __pyx_t_4; } else { __pyx_t_6 = __pyx_t_5; } __pyx_t_4 = __pyx_t_6; __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); __pyx_t_6 = 0; if (((__pyx_t_5 > __pyx_t_6) != 0)) { __pyx_t_7 = __pyx_t_5; } else { __pyx_t_7 = __pyx_t_6; } __pyx_t_6 = __pyx_t_4; for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { __pyx_v_x = __pyx_t_5; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":18 * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: * v_cur = max_neg_val # <<<<<<<<<<<<<< * else: * v_cur = value[y-1, x] */ __pyx_v_v_cur = __pyx_v_max_neg_val; /* "monotonic_align/core.pyx":17 * for y in range(t_y): * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): * if x == y: # <<<<<<<<<<<<<< * v_cur = max_neg_val * else: */ goto __pyx_L7; } /* "monotonic_align/core.pyx":20 * v_cur = max_neg_val * else: * v_cur = value[y-1, x] # <<<<<<<<<<<<<< * if x == 0: * if y == 0: */ /*else*/ { __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_x; __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); } __pyx_L7:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ __pyx_t_8 = ((__pyx_v_x == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ __pyx_t_8 = ((__pyx_v_y == 0) != 0); if (__pyx_t_8) { /* "monotonic_align/core.pyx":23 * if x == 0: * if y == 0: * v_prev = 0. # <<<<<<<<<<<<<< * else: * v_prev = max_neg_val */ __pyx_v_v_prev = 0.; /* "monotonic_align/core.pyx":22 * v_cur = value[y-1, x] * if x == 0: * if y == 0: # <<<<<<<<<<<<<< * v_prev = 0. * else: */ goto __pyx_L9; } /* "monotonic_align/core.pyx":25 * v_prev = 0. * else: * v_prev = max_neg_val # <<<<<<<<<<<<<< * else: * v_prev = value[y-1, x-1] */ /*else*/ { __pyx_v_v_prev = __pyx_v_max_neg_val; } __pyx_L9:; /* "monotonic_align/core.pyx":21 * else: * v_cur = value[y-1, x] * if x == 0: # <<<<<<<<<<<<<< * if y == 0: * v_prev = 0. */ goto __pyx_L8; } /* "monotonic_align/core.pyx":27 * v_prev = max_neg_val * else: * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< * value[y, x] += max(v_prev, v_cur) * */ /*else*/ { __pyx_t_10 = (__pyx_v_y - 1); __pyx_t_9 = (__pyx_v_x - 1); __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); } __pyx_L8:; /* "monotonic_align/core.pyx":28 * else: * v_prev = value[y-1, x-1] * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< * * for y in range(t_y - 1, -1, -1): */ __pyx_t_11 = __pyx_v_v_cur; __pyx_t_12 = __pyx_v_v_prev; if (((__pyx_t_11 > __pyx_t_12) != 0)) { __pyx_t_13 = __pyx_t_11; } else { __pyx_t_13 = __pyx_t_12; } __pyx_t_9 = __pyx_v_y; __pyx_t_10 = __pyx_v_x; *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; } } /* "monotonic_align/core.pyx":30 * value[y, x] += max(v_prev, v_cur) * * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): */ for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_y = __pyx_t_1; /* "monotonic_align/core.pyx":31 * * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 # <<<<<<<<<<<<<< * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 */ __pyx_t_10 = __pyx_v_y; __pyx_t_9 = __pyx_v_index; *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ __pyx_t_14 = ((__pyx_v_index != 0) != 0); if (__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); if (!__pyx_t_14) { } else { __pyx_t_8 = __pyx_t_14; goto __pyx_L13_bool_binop_done; } __pyx_t_9 = (__pyx_v_y - 1); __pyx_t_10 = __pyx_v_index; __pyx_t_15 = (__pyx_v_y - 1); __pyx_t_16 = (__pyx_v_index - 1); __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); __pyx_t_8 = __pyx_t_14; __pyx_L13_bool_binop_done:; if (__pyx_t_8) { /* "monotonic_align/core.pyx":33 * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): * index = index - 1 # <<<<<<<<<<<<<< * * */ __pyx_v_index = (__pyx_v_index - 1); /* "monotonic_align/core.pyx":32 * for y in range(t_y - 1, -1, -1): * path[y, index] = 1 * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< * index = index - 1 * */ } } /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ /* function exit code */ } /* "monotonic_align/core.pyx":38 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { CYTHON_UNUSED int __pyx_v_b; int __pyx_v_i; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; Py_ssize_t __pyx_t_6; Py_ssize_t __pyx_t_7; /* "monotonic_align/core.pyx":39 * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< * cdef int i * for i in prange(b, nogil=True): */ __pyx_v_b = (__pyx_v_paths.shape[0]); /* "monotonic_align/core.pyx":41 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_1 = __pyx_v_b; if ((1 == 0)) abort(); { #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_3 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) #endif /* _OPENMP */ { #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ { __pyx_v_i = (int)(0 + 1 * __pyx_t_2); /* "monotonic_align/core.pyx":42 * cdef int i * for i in prange(b, nogil=True): * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< */ __pyx_t_4.data = __pyx_v_paths.data; __pyx_t_4.memview = __pyx_v_paths.memview; __PYX_INC_MEMVIEW(&__pyx_t_4, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; __pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; __pyx_t_4.suboffsets[0] = -1; __pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; __pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; __pyx_t_4.suboffsets[1] = -1; __pyx_t_5.data = __pyx_v_values.data; __pyx_t_5.memview = __pyx_v_values.memview; __PYX_INC_MEMVIEW(&__pyx_t_5, 0); { Py_ssize_t __pyx_tmp_idx = __pyx_v_i; Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; } __pyx_t_5.shape[0] = __pyx_v_values.shape[1]; __pyx_t_5.strides[0] = __pyx_v_values.strides[1]; __pyx_t_5.suboffsets[0] = -1; __pyx_t_5.shape[1] = __pyx_v_values.shape[2]; __pyx_t_5.strides[1] = __pyx_v_values.strides[2]; __pyx_t_5.suboffsets[1] = -1; __pyx_t_6 = __pyx_v_i; __pyx_t_7 = __pyx_v_i; __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); __pyx_t_4.memview = NULL; __pyx_t_4.data = NULL; __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; } } } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "monotonic_align/core.pyx":41 * cdef int b = paths.shape[0] * cdef int i * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L5:; } } /* "monotonic_align/core.pyx":38 * @cython.boundscheck(False) * @cython.wraparound(False) * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< * cdef int b = paths.shape[0] * cdef int i */ /* function exit code */ } /* Python wrapper */ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; PyObject* values[4] = {0,0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[3] = PyTuple_GET_ITEM(__pyx_args, 3); } __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("maximum_path_c", 0); __Pyx_XDECREF(__pyx_r); if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) } else { /* "View.MemoryView":123 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; int __pyx_t_8; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; Py_ssize_t __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":129 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 129, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":130 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 133, __pyx_L1_error) /* "View.MemoryView":132 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 136, __pyx_L1_error) /* "View.MemoryView":135 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":139 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); } } __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":138 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":140 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) __pyx_t_3 = __pyx_v_format; __Pyx_INCREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_3); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":141 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 141, __pyx_L1_error) } __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) __pyx_v_self->format = __pyx_t_7; /* "View.MemoryView":144 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":145 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 148, __pyx_L1_error) /* "View.MemoryView":147 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_8 = 0; __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_9; __pyx_v_idx = __pyx_t_8; __pyx_t_8 = (__pyx_t_8 + 1); /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":153 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 153, __pyx_L1_error) /* "View.MemoryView":152 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":154 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":151 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":158 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":159 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":157 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) if (likely(__pyx_t_4)) { /* "View.MemoryView":161 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":162 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":160 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":164 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 164, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":166 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":169 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":170 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_Raise(__pyx_t_10, 0, 0, 0); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __PYX_ERR(1, 176, __pyx_L1_error) /* "View.MemoryView":175 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":179 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":180 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 180, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 180, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); __pyx_t_9 = __pyx_t_1; for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { __pyx_v_i = __pyx_t_11; /* "View.MemoryView":181 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":182 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":178 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":171 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":122 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":186 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":188 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":189 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 192, __pyx_L1_error) /* "View.MemoryView":191 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":193 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":194 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":195 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":196 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":197 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":198 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":199 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":200 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":203 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":202 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":205 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":207 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":185 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":212 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":216 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":215 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":218 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":214 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":219 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":211 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":223 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":222 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":227 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":228 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":226 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":231 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":230 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":234 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":233 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":237 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":236 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":240 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":239 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":249 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":248 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":252 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":253 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":255 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":244 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":282 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":281 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":284 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":283 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_v_state = 0; PyObject *__pyx_v__dict = 0; int __pyx_v_use_setstate; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":5 * cdef object _dict * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":6 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":8 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":9 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":7 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":11 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":13 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":12 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef tuple state * cdef object _dict */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":17 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":16 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":300 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":304 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":307 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":306 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":309 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":298 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":346 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":347 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":349 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":351 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":352 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":350 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":348 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":356 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":357 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":355 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":361 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) /* "View.MemoryView":360 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":358 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":364 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":363 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":366 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":368 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":370 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":345 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyThread_type_lock __pyx_t_6; PyThread_type_lock __pyx_t_7; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":374 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * elif (<__pyx_buffer *> &self.view).obj == Py_None: * */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":373 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: */ goto __pyx_L3; } /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * elif (<__pyx_buffer *> &self.view).obj == Py_None: * * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< * Py_DECREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; /* "View.MemoryView":378 * * (<__pyx_buffer *> &self.view).obj = NULL * Py_DECREF(Py_None) # <<<<<<<<<<<<<< * * cdef int i */ Py_DECREF(Py_None); /* "View.MemoryView":375 * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< * * (<__pyx_buffer *> &self.view).obj = NULL */ } __pyx_L3:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":383 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; __pyx_t_4 = __pyx_t_3; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":385 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":388 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":387 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; /* "View.MemoryView":386 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":389 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":384 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":391 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":382 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":372 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":395 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 397, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":398 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":397 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":400 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":393 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":405 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":404 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":407 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 407, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":411 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":410 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":413 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":414 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":403 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ __pyx_t_1 = (__pyx_v_self->view.readonly != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 418, __pyx_L1_error) /* "View.MemoryView":417 * * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: # <<<<<<<<<<<<<< * raise TypeError("Cannot assign to read-only memoryview") * */ } /* "View.MemoryView":420 * raise TypeError("Cannot assign to read-only memoryview") * * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (likely(__pyx_t_2 != Py_None)) { PyObject* sequence = __pyx_t_2; Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 420, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_3; __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":423 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_v_obj = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) if (__pyx_t_1) { /* "View.MemoryView":425 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":424 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L5; } /* "View.MemoryView":427 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L5:; /* "View.MemoryView":422 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L4; } /* "View.MemoryView":429 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L4:; /* "View.MemoryView":416 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":435 * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":434 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; /* "View.MemoryView":436 * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":437 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":433 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":432 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":439 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":431 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; __Pyx_memviewslice *__pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":446 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) /* "View.MemoryView":447 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":445 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) /* "View.MemoryView":441 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; char const *__pyx_t_6; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; PyObject *__pyx_t_12 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":451 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":456 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) __pyx_v_dst_slice = __pyx_t_1; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_2) { /* "View.MemoryView":459 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":461 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) /* "View.MemoryView":460 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":462 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":458 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":464 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":466 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_2) { /* "View.MemoryView":468 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":467 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":470 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L8:; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":475 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":474 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":476 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":479 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __Pyx_XGOTREF(__pyx_t_12); __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_XGIVEREF(__pyx_t_12); __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); } __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_XGIVEREF(__pyx_t_9); __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":449 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":482 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":483 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":481 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":488 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":491 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":493 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":498 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":497 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":499 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; /* "View.MemoryView":494 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_1); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 495, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":492 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":485 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":504 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":510 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":509 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":512 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 514, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":514 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":515 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":501 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; char *__pyx_t_5; void *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; if (__pyx_v_info == NULL) { PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); return -1; } __Pyx_RefNannySetupContext("__getbuffer__", 0); __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L4_bool_binop_done; } __pyx_t_2 = (__pyx_v_self->view.readonly != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 520, __pyx_L1_error) /* "View.MemoryView":519 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< * raise ValueError("Cannot create writable memory view from read-only memoryview") * */ } /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); if (__pyx_t_1) { /* "View.MemoryView":523 * * if flags & PyBUF_ND: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_4 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_4; /* "View.MemoryView":522 * raise ValueError("Cannot create writable memory view from read-only memoryview") * * if flags & PyBUF_ND: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L6; } /* "View.MemoryView":525 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L6:; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":528 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_4 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_4; /* "View.MemoryView":527 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L7; } /* "View.MemoryView":530 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L7:; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":533 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_4 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_4; /* "View.MemoryView":532 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L8; } /* "View.MemoryView":535 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L8:; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":538 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_5 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_5; /* "View.MemoryView":537 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L9; } /* "View.MemoryView":540 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L9:; /* "View.MemoryView":542 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_6 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_6; /* "View.MemoryView":543 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_7 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_7; /* "View.MemoryView":544 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = self.view.readonly */ __pyx_t_8 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_8; /* "View.MemoryView":545 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = self.view.readonly * info.obj = self */ __pyx_t_8 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_8; /* "View.MemoryView":546 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = self.view.readonly # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_t_1 = __pyx_v_self->view.readonly; __pyx_v_info->readonly = __pyx_t_1; /* "View.MemoryView":547 * info.len = self.view.len * info.readonly = self.view.readonly * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":518 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info->obj == Py_None) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":554 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":555 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) /* "View.MemoryView":556 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":553 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":560 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":559 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 570, __pyx_L1_error) /* "View.MemoryView":568 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":572 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":567 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":576 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":579 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":575 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":587 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":586 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":591 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":590 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":596 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":598 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":599 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":601 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":595 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":603 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":594 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":607 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":606 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":609 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":605 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":613 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); /* "View.MemoryView":612 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":611 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":616 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":615 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":622 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":623 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice *__pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":628 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":625 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":633 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":636 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":645 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":647 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":648 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":653 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":643 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":658 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":659 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":660 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":657 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":664 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":663 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":672 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":671 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":674 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":676 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":677 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":678 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 679, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":683 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":681 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":685 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":686 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":680 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (unlikely(__pyx_t_1)) { /* "View.MemoryView":689 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_Raise(__pyx_t_11, 0, 0, 0); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __PYX_ERR(1, 689, __pyx_L1_error) /* "View.MemoryView":688 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":691 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":692 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":679 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":694 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":696 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":695 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":698 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_11); __pyx_t_11 = 0; goto __pyx_L0; /* "View.MemoryView":666 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":701 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (unlikely(__pyx_t_4)) { /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 703, __pyx_L1_error) /* "View.MemoryView":702 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":700 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":711 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":718 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); /* "View.MemoryView":722 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 722, __pyx_L1_error) } } #endif /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":725 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":726 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":724 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":728 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":729 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":735 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":736 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":741 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":742 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 746, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":751 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) /* "View.MemoryView":748 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) /* "View.MemoryView":747 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":755 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":756 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":757 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":758 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":754 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":760 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":761 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":762 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":764 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":765 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":766 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":768 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) /* "View.MemoryView":774 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":746 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":778 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } /* "View.MemoryView":779 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } /* "View.MemoryView":777 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":776 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":783 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":782 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":710 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":830 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":829 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":832 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) /* "View.MemoryView":831 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":827 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":835 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":838 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) /* "View.MemoryView":837 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":843 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":845 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":844 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":842 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":848 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":847 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":850 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":846 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":841 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":853 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":852 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":855 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":861 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":860 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":858 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":863 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":862 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":857 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":865 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":868 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":871 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":870 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":875 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":877 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":881 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":880 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":884 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":885 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":886 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":890 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":889 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":892 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":897 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":896 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":899 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":900 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":895 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":902 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":894 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":904 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":807 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":912 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":913 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":917 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 917, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 917, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":918 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":916 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":920 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":921 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":922 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":926 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":928 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 928, __pyx_L1_error) /* "View.MemoryView":927 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":925 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (unlikely(__pyx_t_2)) { /* "View.MemoryView":931 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 931, __pyx_L1_error) /* "View.MemoryView":930 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":933 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":935 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":934 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":937 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":910 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; long __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":944 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":946 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":947 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":951 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); __pyx_t_4 = __pyx_t_3; for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":952 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":953 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; /* "View.MemoryView":954 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_8) { } else { __pyx_t_7 = __pyx_t_8; goto __pyx_L6_bool_binop_done; } __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_7 = __pyx_t_8; __pyx_L6_bool_binop_done:; if (__pyx_t_7) { /* "View.MemoryView":957 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) /* "View.MemoryView":956 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":959 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":943 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":977 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":976 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":981 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":980 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":983 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":979 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":987 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) /* "View.MemoryView":986 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":989 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":985 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":993 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":992 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":1008 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; /* "View.MemoryView":1007 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1013 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1015 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1016 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1018 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1019 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1021 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1022 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1023 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1024 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1025 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: */ Py_INCREF(Py_None); /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); if (__pyx_t_1) { /* "View.MemoryView":1028 * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * else: * result.flags = PyBUF_RECORDS_RO */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1027 * Py_INCREF(Py_None) * * if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< * result.flags = PyBUF_RECORDS * else: */ goto __pyx_L4; } /* "View.MemoryView":1030 * result.flags = PyBUF_RECORDS * else: * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ /*else*/ { __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; } __pyx_L4:; /* "View.MemoryView":1032 * result.flags = PyBUF_RECORDS_RO * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1033 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1036 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1037 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1039 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1040 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L6_break; /* "View.MemoryView":1038 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L6_break:; /* "View.MemoryView":1042 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1043 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1044 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1046 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1047 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1049 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":999 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1056 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1057 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1055 * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1059 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1060 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1052 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice) except NULL: * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; Py_ssize_t __pyx_t_5; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1067 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1068 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1069 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1071 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1072 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1074 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_dim = __pyx_t_4; /* "View.MemoryView":1075 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1076 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1077 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_5 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; } /* "View.MemoryView":1063 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1083 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1084 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1080 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1095 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1096 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1094 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1098 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1099 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1101 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1103 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1087 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1111 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1110 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1113 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1109 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1121 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1122 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1124 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1126 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1127 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1125 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1129 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_1; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1131 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1132 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1130 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1135 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1134 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1137 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1116 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; Py_ssize_t __pyx_t_6; /* "View.MemoryView":1147 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1148 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1149 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1150 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1154 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1155 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); /* "View.MemoryView":1153 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1157 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1158 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); /* "View.MemoryView":1159 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1160 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1152 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1162 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; __pyx_t_5 = __pyx_t_4; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1163 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1167 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1168 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1140 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1173 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1170 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; /* "View.MemoryView":1179 * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for shape in src.shape[:ndim]: */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1181 * cdef Py_ssize_t shape, size = src.memview.view.itemsize * * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< * size *= shape * */ __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_shape = (__pyx_t_2[0]); /* "View.MemoryView":1182 * * for shape in src.shape[:ndim]: * size *= shape # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * __pyx_v_shape); } /* "View.MemoryView":1184 * size *= shape * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1177 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef Py_ssize_t shape, size = src.memview.view.itemsize */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1197 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ __pyx_t_2 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_idx = __pyx_t_4; /* "View.MemoryView":1198 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1199 * for idx in range(ndim): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1196 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1201 * stride *= shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride *= shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1202 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride *= shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1203 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride *= shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1205 * stride *= shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1187 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1219 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1220 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1222 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1224 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) /* "View.MemoryView":1223 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1227 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1228 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1229 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1230 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1231 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1233 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); /* "View.MemoryView":1237 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; __pyx_t_5 = __pyx_t_3; for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { __pyx_v_i = __pyx_t_6; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1239 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1238 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1242 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); /* "View.MemoryView":1241 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1244 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1246 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1208 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1254 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1253 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 1253, __pyx_L1_error) /* "View.MemoryView":1251 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1258 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1258, __pyx_L1_error) /* "View.MemoryView":1257 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (unlikely(__pyx_t_1)) { /* "View.MemoryView":1263 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1263, __pyx_L1_error) /* "View.MemoryView":1262 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1265 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1265, __pyx_L1_error) } /* "View.MemoryView":1261 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; int __pyx_t_6; void *__pyx_t_7; int __pyx_t_8; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; /* "View.MemoryView":1276 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1277 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1279 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1280 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1281 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1285 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1284 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1287 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1286 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1289 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1291 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; __pyx_t_3 = __pyx_t_5; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1294 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1295 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1293 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1297 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1292 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) /* "View.MemoryView":1299 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1305 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1304 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1307 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_7; /* "View.MemoryView":1308 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1302 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1314 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1313 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1316 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1315 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1320 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1321 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1323 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1324 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1318 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1310 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_8 = (__pyx_t_2 != 0); if (__pyx_t_8) { /* "View.MemoryView":1329 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) /* "View.MemoryView":1330 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) /* "View.MemoryView":1326 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1332 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1333 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1334 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1336 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1337 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1268 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1344 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1346 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1347 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1348 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1349 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1351 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1352 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1353 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1354 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1340 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1367 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1366 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1362 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1374 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1371 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1381 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); __pyx_t_2 = __pyx_t_1; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_4) { /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_4 = (__pyx_v_inc != 0); if (__pyx_t_4) { /* "View.MemoryView":1384 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1383 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1386 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1382 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1388 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1389 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1391 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1377 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1400 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1401 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1403 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1397 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; Py_ssize_t __pyx_t_4; /* "View.MemoryView":1411 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1412 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1415 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1416 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); /* "View.MemoryView":1417 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1414 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1419 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; __pyx_t_3 = __pyx_t_2; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":1420 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1422 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1407 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = 0; PyObject *__pyx_v___pyx_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; int __pyx_t_6; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":5 * cdef object __pyx_result * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 6, __pyx_L1_error) /* "(tree fragment)":4 * cdef object __pyx_PickleError * cdef object __pyx_result * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":7 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_6 = (__pyx_t_1 != 0); if (__pyx_t_6) { /* "(tree fragment)":9 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":8 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":10 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":12 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 13, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":14 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 14, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":13 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_array___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryview___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); __pyx_memoryviewslice___dealloc__(o); __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "monotonic_align.core._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ #if PY_VERSION_HEX < 0x030800b4 0, /*tp_print*/ #endif #if PY_VERSION_HEX >= 0x030800b4 0, /*tp_vectorcall_offset*/ #endif 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) 0, /*tp_vectorcall*/ #endif #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 0, /*tp_print*/ #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 0, /*tp_pypy_flags*/ #endif }; static PyMethodDef __pyx_methods[] = { {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_core}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "core", 0, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif #ifndef CYTHON_SMALL_CODE #if defined(__clang__) #define CYTHON_SMALL_CODE #elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define CYTHON_SMALL_CODE __attribute__((cold)) #else #define CYTHON_SMALL_CODE #endif #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":133 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":136 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":148 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":176 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":192 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":418 * def __setitem__(memoryview self, object index, object value): * if self.view.readonly: * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< * * have_slices, index = _unellipsify(index, self.view.ndim) */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":495 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":520 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_WRITABLE and self.view.readonly: * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< * * if flags & PyBUF_ND: */ __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); /* "View.MemoryView":570 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "View.MemoryView":577 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__13); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__14); __Pyx_GIVEREF(__pyx_tuple__14); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__15); __Pyx_GIVEREF(__pyx_tuple__15); /* "View.MemoryView":682 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":703 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__21); __Pyx_GIVEREF(__pyx_tuple__21); /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { /* InitThreads.init */ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ static int __Pyx_modinit_global_init_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); /*--- Variable export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_export_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); /*--- Function export code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_type_init_code(void) { __Pyx_RefNannyDeclarations int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_array.tp_print = 0; #endif if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_MemviewEnum.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryview.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) #if PY_VERSION_HEX < 0x030800B1 __pyx_type___pyx_memoryviewslice.tp_print = 0; #endif if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; } if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_modinit_type_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); /*--- Type import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_variable_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); /*--- Variable import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } static int __Pyx_modinit_function_import_code(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); /*--- Function import code ---*/ __Pyx_RefNannyFinishContext(); return 0; } #ifndef CYTHON_NO_PYINIT_EXPORT #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC #elif PY_MAJOR_VERSION < 3 #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" void #else #define __Pyx_PyMODINIT_FUNC void #endif #else #ifdef __cplusplus #define __Pyx_PyMODINIT_FUNC extern "C" PyObject * #else #define __Pyx_PyMODINIT_FUNC PyObject * #endif #endif #if PY_MAJOR_VERSION < 3 __Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC initcore(void) #else __Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ __Pyx_PyMODINIT_FUNC PyInit_core(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { #if PY_VERSION_HEX >= 0x030700A1 static PY_INT64_T main_interpreter_id = -1; PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); if (main_interpreter_id == -1) { main_interpreter_id = current_id; return (unlikely(current_id == -1)) ? -1 : 0; } else if (unlikely(main_interpreter_id != current_id)) #else static PyInterpreterState *main_interpreter = NULL; PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; if (!main_interpreter) { main_interpreter = current_interpreter; } else if (unlikely(main_interpreter != current_interpreter)) #endif { PyErr_SetString( PyExc_ImportError, "Interpreter change detected - this module can only be loaded into one interpreter per process."); return -1; } return 0; } static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { if (allow_none || value != Py_None) { result = PyDict_SetItemString(moddict, to_name, value); } Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__Pyx_check_single_interpreter()) return NULL; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; int __pyx_lineno = 0; const char *__pyx_filename = NULL; int __pyx_clineno = 0; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m) { if (__pyx_m == __pyx_pyinit_module) return 0; PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); return -1; } #elif PY_MAJOR_VERSION >= 3 if (__pyx_m) return __Pyx_NewRef(__pyx_m); #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pxy_PyFrame_Initialize_Offsets __Pxy_PyFrame_Initialize_Offsets(); #endif __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS PyEval_InitThreads(); #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_b); __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_cython_runtime); if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_monotonic_align__core) { if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "monotonic_align.core")) { if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global type/function init code ---*/ (void)__Pyx_modinit_global_init_code(); (void)__Pyx_modinit_variable_export_code(); (void)__Pyx_modinit_function_export_code(); if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) (void)__Pyx_modinit_type_import_code(); (void)__Pyx_modinit_variable_import_code(); (void)__Pyx_modinit_function_import_code(); /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "monotonic_align/core.pyx":7 * @cython.boundscheck(False) * @cython.wraparound(False) * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< * cdef int x * cdef int y */ __pyx_k_ = (-1e9); /* "monotonic_align/core.pyx":1 * cimport cython # <<<<<<<<<<<<<< * from cython.parallel import prange * */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":209 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":286 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":287 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":288 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":291 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":292 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":316 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":317 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":549 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":995 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * cdef object __pyx_PickleError * cdef object __pyx_result */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_CLEAR(__pyx_m); } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule(modname); if (!m) goto end; p = PyObject_GetAttrString(m, "RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* PyObjectGetAttrStr */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (unlikely(memviewslice->memview || memviewslice->data)) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) return; if (unlikely(__pyx_get_slice_count(memview) < 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (unlikely(first_time)) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (unlikely(!memview || (PyObject *) memview == Py_None)) { memslice->memview = NULL; return; } if (unlikely(__pyx_get_slice_count(memview) <= 0)) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (unlikely(last_time)) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = Py_TYPE(func)->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = __Pyx_PyFrame_GetLocalsplus(f); for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, (int)nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall2Args */ static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { PyObject *args, *result = NULL; #if CYTHON_FAST_PYCALL if (PyFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyFunction_FastCall(function, args, 2); } #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(function)) { PyObject *args[2] = {arg1, arg2}; return __Pyx_PyCFunction_FastCall(function, args, 2); } #endif args = PyTuple_New(2); if (unlikely(!args)) goto done; Py_INCREF(arg1); PyTuple_SET_ITEM(args, 0, arg1); Py_INCREF(arg2); PyTuple_SET_ITEM(args, 1, arg2); Py_INCREF(function); result = __Pyx_PyObject_Call(function, args, NULL); Py_DECREF(args); Py_DECREF(function); done: return result; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (__Pyx_PyFastCFunction_Check(func)) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* DivInt[Py_ssize_t] */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* ObjectGetItem */ #if CYTHON_USE_TYPE_SLOTS static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { PyObject *runerr; Py_ssize_t key_value; PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; if (unlikely(!(m && m->sq_item))) { PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); return NULL; } key_value = __Pyx_PyIndex_AsSsize_t(index); if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); } if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { PyErr_Clear(); PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); } return NULL; } static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; if (likely(m && m->mp_subscript)) { return m->mp_subscript(obj, key); } return __Pyx_PyObject_GetIndex(obj, key); } #endif /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } if (unlikely(stop <= start)) return __Pyx_NewRef(__pyx_empty_unicode); length = stop - start; cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* PyDictVersioning */ #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { PyObject *dict = Py_TYPE(obj)->tp_dict; return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; } static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { PyObject **dictptr = NULL; Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; if (offset) { #if CYTHON_COMPILING_IN_CPYTHON dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); #else dictptr = _PyObject_GetDictPtr(obj); #endif } return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; } static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { PyObject *dict = Py_TYPE(obj)->tp_dict; if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) return 0; return obj_dict_version == __Pyx_get_object_dict_version(obj); } #endif /* GetModuleGlobalName */ #if CYTHON_USE_DICT_VERSIONS static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) #else static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) #endif { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } else if (unlikely(PyErr_Occurred())) { return NULL; } #else result = PyDict_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } #endif #else result = PyObject_GetItem(__pyx_d, name); __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) if (likely(result)) { return __Pyx_NewRef(result); } PyErr_Clear(); #endif return __Pyx_GetBuiltinName(name); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* GetTopmostException */ #if CYTHON_USE_EXC_INFO_STACK static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate) { _PyErr_StackItem *exc_info = tstate->exc_info; while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && exc_info->previous_item != NULL) { exc_info = exc_info->previous_item; } return exc_info; } #endif /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); *type = exc_info->exc_type; *value = exc_info->exc_value; *tb = exc_info->exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = type; exc_info->exc_value = value; exc_info->exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) #endif { PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if CYTHON_USE_EXC_INFO_STACK { _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = local_type; exc_info->exc_value = local_value; exc_info->exc_traceback = local_tb; } #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if CYTHON_USE_EXC_INFO_STACK _PyErr_StackItem *exc_info = tstate->exc_info; tmp_type = exc_info->exc_type; tmp_value = exc_info->exc_value; tmp_tb = exc_info->exc_traceback; exc_info->exc_type = *type; exc_info->exc_value = *value; exc_info->exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; assert(PyExceptionClass_Check(exc_type)); n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { PyObject *t = PyTuple_GET_ITEM(tuple, i); #if PY_MAJOR_VERSION < 3 if (likely(exc_type == t)) return 1; #endif if (likely(PyExceptionClass_Check(t))) { if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; } else { } } return 0; } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { if (likely(PyExceptionClass_Check(exc_type))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } else if (likely(PyTuple_Check(exc_type))) { return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); } else { } } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { assert(PyExceptionClass_Check(exc_type1)); assert(PyExceptionClass_Check(exc_type2)); if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) { (void)inplace; (void)zerodivision_check; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } CYTHON_FALLTHROUGH; default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* DivInt[long] */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* PyObject_GenericGetAttrNoDict */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { PyErr_Format(PyExc_AttributeError, #if PY_MAJOR_VERSION >= 3 "'%.50s' object has no attribute '%U'", tp->tp_name, attr_name); #else "'%.50s' object has no attribute '%.400s'", tp->tp_name, PyString_AS_STRING(attr_name)); #endif return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { PyObject *descr; PyTypeObject *tp = Py_TYPE(obj); if (unlikely(!PyString_Check(attr_name))) { return PyObject_GenericGetAttr(obj, attr_name); } assert(!tp->tp_dictoffset); descr = _PyType_Lookup(tp, attr_name); if (unlikely(!descr)) { return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); } Py_INCREF(descr); #if PY_MAJOR_VERSION < 3 if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) #endif { descrgetfunc f = Py_TYPE(descr)->tp_descr_get; if (unlikely(f)) { PyObject *res = f(descr, obj, (PyObject *)tp); Py_DECREF(descr); return res; } } return descr; } #endif /* PyObject_GenericGetAttr */ #if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { return PyObject_GenericGetAttr(obj, attr_name); } return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); } #endif /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* PyObjectGetAttrStrNoError */ static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) __Pyx_PyErr_Clear(); } static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); } #endif result = __Pyx_PyObject_GetAttrStr(obj, attr_name); if (unlikely(!result)) { __Pyx_PyObject_GetAttrStr_ClearAttributeError(); } return result; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); if (likely(reduce_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (reduce == object_reduce || PyErr_Occurred()) { goto __PYX_BAD; } setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); if (likely(setstate_cython)) { ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; } else if (!setstate || PyErr_Occurred()) { goto __PYX_BAD; } } PyType_Modified((PyTypeObject*)type_obj); } } goto __PYX_GOOD; __PYX_BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; __PYX_GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif if (unlikely(!__pyx_cython_runtime)) { return c_line; } __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { __PYX_PY_DICT_LOOKUP_IF_MODIFIED( use_cline, *cython_runtime_dict, __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = NULL; PyObject *py_funcname = NULL; #if PY_MAJOR_VERSION < 3 PyObject *py_srcfile = NULL; py_srcfile = PyString_FromString(filename); if (!py_srcfile) goto bad; #endif if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); if (!py_funcname) goto bad; funcname = PyUnicode_AsUTF8(py_funcname); if (!funcname) goto bad; #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); if (!py_funcname) goto bad; #endif } #if PY_MAJOR_VERSION < 3 py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); #else py_code = PyCode_NewEmpty(filename, funcname, py_line); #endif Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline return py_code; bad: Py_XDECREF(py_funcname); #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_srcfile); #endif return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t <= '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case '?': return "'bool'"; case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number, ndim; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ndim = ctx->head->field->type->ndim; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } CYTHON_FALLTHROUGH; case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } CYTHON_FALLTHROUGH; case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (unlikely(buf->strides[dim] != sizeof(void *))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (unlikely(buf->strides[dim] != buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (unlikely(stride < buf->itemsize)) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (unlikely(buf->suboffsets)) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (unlikely(buf->ndim != ndim)) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; } if (unlikely((unsigned) buf->itemsize != dtype->size)) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->len > 0) { for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) goto fail; if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) goto fail; } if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) goto fail; } if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, &__Pyx_TypeInfo_float, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, &__Pyx_TypeInfo_int, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (unlikely(from_mvs->suboffsets[i] >= 0)) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const int neg_one = (int) -1, const_zero = (int) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const long neg_one = (long) -1, const_zero = (long) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wconversion" #endif const char neg_one = (char) -1, const_zero = (char) 0; #ifdef __Pyx_HAS_GCC_DIAGNOSTIC #pragma GCC diagnostic pop #endif const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { int retval; if (unlikely(!x)) return -1; retval = __Pyx_PyObject_IsTrue(x); Py_DECREF(x); return retval; } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(b); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); #if PY_MAJOR_VERSION < 3 } else if (likely(PyInt_CheckExact(o))) { return PyInt_AS_LONG(o); #endif } else { Py_ssize_t ival; PyObject *x; x = PyNumber_Index(o); if (!x) return -1; ival = PyInt_AsLong(x); Py_DECREF(x); return ival; } } static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_binop__plus_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__plus_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fp64) // A*D function (colscale): GB (_AxD__plus_fp64) // D*A function (rowscale): GB (_DxB__plus_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fp64) // C=scalar+B GB (_bind1st__plus_fp64) // C=scalar+B' GB (_bind1st_tran__plus_fp64) // C=A+scalar GB (_bind2nd__plus_fp64) // C=A'+scalar GB (_bind2nd_tran__plus_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FP64 || GxB_NO_PLUS_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__plus_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__plus_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
conv3x3s1_winograd64_transform_kernel_neon_GgG.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "option.h" #include "mat.h" namespace ncnn{ static void conv3x3s1_winograd64_transform_kernel_neon_GgG(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8*8, inch, outch); const float ktm[8][3] = { { 1.0f, 0.0f, 0.0f}, {-2.0f/9, -2.0f/9, -2.0f/9}, {-2.0f/9, 2.0f/9, -2.0f/9}, {1.0f/90, 1.0f/45, 2.0f/45}, {1.0f/90, -1.0f/45, 2.0f/45}, {1.0f/45, 1.0f/90, 1.0f/180}, {1.0f/45, -1.0f/90, 1.0f/180}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i=0; i<8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j=0; j<8; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<8; i++) { kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-4,6),ceild(8*t2-Nz-11,24));t3<=min(floord(4*Nt+Ny-9,24),floord(4*t1+Ny-1,24));t3++) { for (t4=max(max(ceild(t1-510,512),ceild(8*t2-Nz-2035,2048)),ceild(24*t3-Ny-2035,2048));t4<=min(min(floord(4*Nt+Nx-9,2048),floord(4*t1+Nx-1,2048)),floord(24*t3+Nx+11,2048));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(2048*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),6*t3+4),512*t4+510);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(2048*t4,4*t5+4); ubv=min(2048*t4+2047,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/memory_.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resample.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImage() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImage method is: % % MagickBooleanType CompositeImage(Image *image, % const Image *source_image,const CompositeOperator compose, % const MagickBooleanType clip_to_self,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o source_image: the source image. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o clip_to_self: set to MagickTrue to limit composition to area composed. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o exception: return any errors or warnings in this structure. % */ /* Composition based on the SVG specification: A Composition is defined by... Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors Blending areas : X = 1 for area of overlap, ie: f(Sc,Dc) Y = 1 for source preserved Z = 1 for canvas preserved Conversion to transparency (then optimized) Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) Where... Sca = Sc*Sa normalized Source color divided by Source alpha Dca = Dc*Da normalized Dest color divided by Dest alpha Dc' = Dca'/Da' the desired color value for this channel. Da' in in the follow formula as 'gamma' The resulting alpla value. Most functions use a blending mode of over (X=1,Y=1,Z=1) this results in the following optimizations... gamma = Sa+Da-Sa*Da; gamma = 1 - QuantiumScale*alpha * QuantiumScale*beta; opacity = QuantiumScale*alpha*beta; // over blend, optimized 1-Gamma The above SVG definitions also definate that Mathematical Composition methods should use a 'Over' blending mode for Alpha Channel. It however was not applied for composition modes of 'Plus', 'Minus', the modulus versions of 'Add' and 'Subtract'. Mathematical operator changes to be applied from IM v6.7... 1) Modulus modes 'Add' and 'Subtract' are obsoleted and renamed 'ModulusAdd' and 'ModulusSubtract' for clarity. 2) All mathematical compositions work as per the SVG specification with regard to blending. This now includes 'ModulusAdd' and 'ModulusSubtract'. 3) When the special channel flag 'sync' (syncronize channel updates) is turned off (enabled by default) then mathematical compositions are only performed on the channels specified, and are applied independantally of each other. In other words the mathematics is performed as 'pure' mathematical operations, rather than as image operations. */ static void HCLComposite(const MagickRealType hue,const MagickRealType chroma, const MagickRealType luma,MagickRealType *red,MagickRealType *green, MagickRealType *blue) { MagickRealType b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,MagickRealType *hue,MagickRealType *chroma, MagickRealType *luma) { MagickRealType b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (MagickRealType *) NULL); assert(chroma != (MagickRealType *) NULL); assert(luma != (MagickRealType *) NULL); r=red; g=green; b=blue; max=MagickMax(r,MagickMax(g,b)); c=max-(MagickRealType) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == max) h=fmod((g-b)/c+6.0,6.0); else if (green == max) h=((b-r)/c)+2.0; else if (blue == max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static MagickBooleanType CompositeOverImage(Image *image, const Image *source_image,const MagickBooleanType clip_to_self, const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *image_view, *source_view; const char *value; MagickBooleanType clamp, status; MagickOffsetType progress; ssize_t y; /* Composite image. */ status=MagickTrue; progress=0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); alpha=Sa+Da-Sa*Da; if (GetPixelWriteMask(image,q) == 0) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ pixel=QuantumRange*alpha; q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; gamma=PerceptibleReciprocal(alpha); pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType CompositeImage(Image *image, const Image *composite,const CompositeOperator compose, const MagickBooleanType clip_to_self,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, status; MagickOffsetType progress; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); (void) SetImageColorspace(source_image,image->colorspace,exception); if ((compose == OverCompositeOp) || (compose == SrcOverCompositeOp)) { status=CompositeOverImage(image,source_image,clip_to_self,x_offset, y_offset,exception); source_image=DestroyImage(source_image); return(status); } amount=0.5; canvas_image=(Image *) NULL; canvas_dissolve=1.0; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsStringTrue(value); SetGeometryInfo(&geometry_info); percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { register ssize_t i; if (GetPixelReadMask(source_image,p) == 0) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if (traits == UndefinedPixelTrait) continue; if (source_traits != UndefinedPixelTrait) SetPixelChannel(image,channel,p[i],q); else if (channel == AlphaPixelChannel) SetPixelChannel(image,channel,OpaqueAlpha,q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case IntensityCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) > (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) > (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *p; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) source_image->columns; x++) { if (GetPixelReadMask(source_image,p) == 0) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } SetPixelAlpha(image,clamp != MagickFalse ? ClampPixel(GetPixelIntensity(source_image,p)) : ClampToQuantum(GetPixelIntensity(source_image,p)),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag, (MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyAlphaCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case BlurCompositeOp: { CacheView *canvas_view; MagickRealType angle_range, angle_start, height, width; PixelInfo pixel; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (const char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "InvalidSetting","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* Default the unrotated ellipse width and height axis vectors. */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter); /* do the variable blurring of each pixel in image */ GetPixelInfo(image,&pixel); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } if (fabs((double) angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale* GetPixelBlue(source_image,p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { (void) fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n",blur.x1, blur.x2,blur.y1, blur.y2); (void) fprintf(stderr, "scaled by=%lf,%lf\n",QuantumScale* GetPixelRed(p),QuantumScale*GetPixelGreen(p)); #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(source_image,p), blur.y1*QuantumScale*GetPixelGreen(source_image,p), blur.x2*QuantumScale*GetPixelRed(source_image,p), blur.y2*QuantumScale*GetPixelGreen(source_image,p) ); (void) ResamplePixelColor(resample_filter,(double) x_offset+x, (double) y_offset+y,&pixel,exception); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view; MagickRealType horizontal_scale, vertical_scale; PixelInfo pixel; PointInfo center, offset; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=(MagickRealType) ((image->columns-1)/2.0); else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) != 0) center.x=geometry_info.xi; else center.x=(MagickRealType) (x_offset+geometry_info.xi); if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=(MagickRealType) ((image->rows-1)/2.0); else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ GetPixelInfo(image,&pixel); image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p+=GetPixelChannels(source_image); continue; } /* Displace the offset. */ offset.x=(double) (horizontal_scale*(GetPixelRed(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0); offset.y=(double) (vertical_scale*(GetPixelGreen(source_image,p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0); (void) InterpolatePixelInfo(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.alpha=(MagickRealType) QuantumRange*(QuantumScale*pixel.alpha)* (QuantumScale*GetPixelAlpha(source_image,p)); SetPixelViaPixelInfo(canvas_image,&pixel,q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(canvas_image); } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } /* Composite image. */ status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *pixels; MagickRealType blue, chroma, green, hue, luma, red; PixelInfo canvas_pixel, source_pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(Quantum *) NULL; p=(Quantum *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset*GetPixelChannels(source_image); } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } hue=0.0; chroma=0.0; luma=0.0; GetPixelInfo(image,&canvas_pixel); GetPixelInfo(source_image,&source_pixel); for (x=0; x < (ssize_t) image->columns; x++) { double gamma; MagickRealType alpha, Da, Dc, Dca, Sa, Sc, Sca; register ssize_t i; size_t channels; if (clip_to_self != MagickFalse) { if (x < x_offset) { q+=GetPixelChannels(image); continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } if ((pixels == (Quantum *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { Quantum source[MaxPixelChannels]; /* Virtual composite: Sc: source color. Dc: canvas color. */ (void) GetOneVirtualPixel(source_image,x-x_offset,y-y_offset,source, exception); if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image, channel); if ((traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; switch (compose) { case AlphaCompositeOp: case ChangeMaskCompositeOp: case CopyAlphaCompositeOp: case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case OutCompositeOp: case SrcInCompositeOp: case SrcOutCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) q[i]; break; } case ClearCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { if (channel == AlphaPixelChannel) pixel=(MagickRealType) TransparentAlpha; else pixel=0.0; break; } case BlendCompositeOp: case DissolveCompositeOp: { if (channel == AlphaPixelChannel) pixel=canvas_dissolve*GetPixelAlpha(source_image,source); else pixel=(MagickRealType) source[channel]; break; } default: { pixel=(MagickRealType) source[channel]; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } q+=GetPixelChannels(image); continue; } /* Authentic composite: Sa: normalized source alpha. Da: normalized canvas alpha. */ Sa=QuantumScale*GetPixelAlpha(source_image,p); Da=QuantumScale*GetPixelAlpha(image,q); switch (compose) { case BumpmapCompositeOp: { alpha=GetPixelIntensity(source_image,p)*Sa; break; } case ColorBurnCompositeOp: case ColorDodgeCompositeOp: case DarkenCompositeOp: case DifferenceCompositeOp: case DivideDstCompositeOp: case DivideSrcCompositeOp: case ExclusionCompositeOp: case HardLightCompositeOp: case HardMixCompositeOp: case LinearBurnCompositeOp: case LinearDodgeCompositeOp: case LinearLightCompositeOp: case LightenCompositeOp: case MathematicsCompositeOp: case MinusDstCompositeOp: case MinusSrcCompositeOp: case ModulusAddCompositeOp: case ModulusSubtractCompositeOp: case MultiplyCompositeOp: case OverlayCompositeOp: case PegtopLightCompositeOp: case PinLightCompositeOp: case ScreenCompositeOp: case SoftLightCompositeOp: case VividLightCompositeOp: { alpha=RoundToUnity(Sa+Da-Sa*Da); break; } case DstAtopCompositeOp: case DstInCompositeOp: case InCompositeOp: case SrcInCompositeOp: { alpha=Sa*Da; break; } case DissolveCompositeOp: { alpha=source_dissolve*Sa*(-canvas_dissolve*Da)+source_dissolve*Sa+ canvas_dissolve*Da; break; } case DstOverCompositeOp: case OverCompositeOp: case SrcOverCompositeOp: { alpha=Sa+Da-Sa*Da; break; } case DstOutCompositeOp: { alpha=Da*(1.0-Sa); break; } case OutCompositeOp: case SrcOutCompositeOp: { alpha=Sa*(1.0-Da); break; } case BlendCompositeOp: case PlusCompositeOp: { alpha=RoundToUnity(source_dissolve*Sa+canvas_dissolve*Da); break; } case XorCompositeOp: { alpha=Sa+Da-2.0*Sa*Da; break; } default: { alpha=1.0; break; } } if (GetPixelWriteMask(image,q) == 0) { p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); continue; } switch (compose) { case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case ModulateCompositeOp: case SaturateCompositeOp: { GetPixelInfoPixel(source_image,p,&source_pixel); GetPixelInfoPixel(image,q,&canvas_pixel); break; } default: break; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { MagickRealType pixel, sans; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if (traits == UndefinedPixelTrait) continue; if ((source_traits == UndefinedPixelTrait) && (channel != AlphaPixelChannel)) continue; if (channel == AlphaPixelChannel) { /* Set alpha channel. */ switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case CopyBlackCompositeOp: case CopyBlueCompositeOp: case CopyCyanCompositeOp: case CopyGreenCompositeOp: case CopyMagentaCompositeOp: case CopyRedCompositeOp: case CopyYellowCompositeOp: case SrcAtopCompositeOp: case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Da; break; } case ChangeMaskCompositeOp: { MagickBooleanType equivalent; if (Da < 0.5) { pixel=(MagickRealType) TransparentAlpha; break; } equivalent=IsFuzzyEquivalencePixel(source_image,p,image,q); if (equivalent != MagickFalse) pixel=(MagickRealType) TransparentAlpha; else pixel=(MagickRealType) OpaqueAlpha; break; } case ClearCompositeOp: { pixel=(MagickRealType) TransparentAlpha; break; } case ColorizeCompositeOp: case HueCompositeOp: case LuminizeCompositeOp: case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Da; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=QuantumRange*Sa; break; } if (Sa < Da) { pixel=QuantumRange*Da; break; } pixel=QuantumRange*Sa; break; } case CopyAlphaCompositeOp: { if (source_image->alpha_trait == UndefinedPixelTrait) pixel=GetPixelIntensity(source_image,p); else pixel=QuantumRange*Sa; break; } case CopyCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: case DstAtopCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sa; break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case LightenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sa : Da; break; } case ModulateCompositeOp: { pixel=QuantumRange*Da; break; } default: { pixel=QuantumRange*alpha; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); continue; } /* Sc: source color. Dc: canvas color. */ Sc=(MagickRealType) GetPixelChannel(source_image,channel,p); Dc=(MagickRealType) q[i]; if ((traits & CopyPixelTrait) != 0) { /* Copy channel. */ q[i]=Sc; continue; } /* Porter-Duff compositions: Sca: source normalized color multiplied by alpha. Dca: normalized canvas color multiplied by alpha. */ Sca=QuantumScale*Sa*Sc; Dca=QuantumScale*Da*Dc; switch (compose) { case DarkenCompositeOp: case LightenCompositeOp: case ModulusSubtractCompositeOp: { gamma=PerceptibleReciprocal(1.0-alpha); break; } default: { gamma=PerceptibleReciprocal(alpha); break; } } pixel=Dc; switch (compose) { case AlphaCompositeOp: { pixel=QuantumRange*Sa; break; } case AtopCompositeOp: case SrcAtopCompositeOp: { pixel=QuantumRange*(Sca*Da+Dca*(1.0-Sa)); break; } case BlendCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc+canvas_dissolve*Da*Dc); break; } case BlurCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: case SrcCompositeOp: { pixel=QuantumRange*Sca; break; } case DisplaceCompositeOp: case DistortCompositeOp: { pixel=Sc; break; } case BumpmapCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } pixel=QuantumScale*GetPixelIntensity(source_image,p)*Dc; break; } case ChangeMaskCompositeOp: { pixel=Dc; break; } case ClearCompositeOp: { pixel=0.0; break; } case ColorBurnCompositeOp: { if ((Sca == 0.0) && (Dca == Da)) { pixel=QuantumRange*gamma*(Sa*Da+Dca*(1.0-Sa)); break; } if (Sca == 0.0) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-Sa*Da*MagickMin(1.0,(1.0-Dca/Da)*Sa/ Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ColorDodgeCompositeOp: { if ((Sca*Da+Dca*Sa) >= Sa*Da) pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); else pixel=QuantumRange*gamma*(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case ColorizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &sans,&sans,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case CopyAlphaCompositeOp: { pixel=Dc; break; } case CopyBlackCompositeOp: { if (channel == BlackPixelChannel) pixel=(MagickRealType) (QuantumRange- GetPixelBlack(source_image,p)); break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { if (channel == BluePixelChannel) pixel=(MagickRealType) GetPixelBlue(source_image,p); break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { if (channel == GreenPixelChannel) pixel=(MagickRealType) GetPixelGreen(source_image,p); break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { if (channel == RedPixelChannel) pixel=(MagickRealType) GetPixelRed(source_image,p); break; } case DarkenCompositeOp: { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ if ((Sca*Da) < (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case DarkenIntensityCompositeOp: { pixel=Sa*GetPixelIntensity(source_image,p) < Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case DifferenceCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca-2.0*MagickMin(Sca*Da,Dca*Sa)); break; } case DissolveCompositeOp: { pixel=gamma*(source_dissolve*Sa*Sc-source_dissolve*Sa* canvas_dissolve*Da*Dc+canvas_dissolve*Da*Dc); break; } case DivideDstCompositeOp: { if ((fabs((double) Sca) < MagickEpsilon) && (fabs((double) Dca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (fabs((double) Dca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*Da*Da/Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case DivideSrcCompositeOp: { if ((fabs((double) Dca) < MagickEpsilon) && (fabs((double) Sca) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } if (fabs((double) Sca) < MagickEpsilon) { pixel=QuantumRange*gamma*(Da*Sa+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/Sca+Dca*(1.0-Sa)+Sca*(1.0-Da)); break; } case DstAtopCompositeOp: { pixel=QuantumRange*(Dca*Sa+Sca*(1.0-Da)); break; } case DstCompositeOp: case NoCompositeOp: { pixel=QuantumRange*Dca; break; } case DstInCompositeOp: { pixel=QuantumRange*(Dca*Sa); break; } case DstOutCompositeOp: { pixel=QuantumRange*(Dca*(1.0-Sa)); break; } case DstOverCompositeOp: { pixel=QuantumRange*gamma*(Dca+Sca*(1.0-Da)); break; } case ExclusionCompositeOp: { pixel=QuantumRange*gamma*(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0- Sa)); break; } pixel=QuantumRange*gamma*(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } case HardMixCompositeOp: { pixel=gamma*(((Sca+Dca) < 1.0) ? 0.0 : QuantumRange); break; } case HueCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &hue,&sans,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case InCompositeOp: case SrcInCompositeOp: { pixel=QuantumRange*(Sca*Da); break; } case LinearBurnCompositeOp: { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ pixel=QuantumRange*gamma*(Sca+Dca-Sa*Da); break; } case LinearDodgeCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc); break; } case LinearLightCompositeOp: { /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ pixel=QuantumRange*gamma*((Sca-Sa)*Da+Sca+Dca); break; } case LightenCompositeOp: { if ((Sca*Da) > (Dca*Sa)) { pixel=QuantumRange*(Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*(Dca+Sca*(1.0-Da)); break; } case LightenIntensityCompositeOp: { /* Lighten is equivalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ pixel=Sa*GetPixelIntensity(source_image,p) > Da*GetPixelIntensity(image,q) ? Sc : Dc; break; } case LuminizeCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&sans,&luma); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case MathematicsCompositeOp: { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ pixel=QuantumRange*gamma*(geometry_info.rho*Sca*Dca+ geometry_info.sigma*Sca*Da+geometry_info.xi*Dca*Sa+ geometry_info.psi*Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case MinusDstCompositeOp: { pixel=gamma*(Sa*Sc+Da*Dc-2.0*Da*Dc*Sa); break; } case MinusSrcCompositeOp: { /* Minus source from canvas. f(Sc,Dc) = Sc - Dc */ pixel=gamma*(Da*Dc+Sa*Sc-2.0*Sa*Sc*Da); break; } case ModulateCompositeOp: { ssize_t offset; if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } offset=(ssize_t) (GetPixelIntensity(source_image,p)-midpoint); if (offset == 0) { pixel=Dc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ModulusAddCompositeOp: { pixel=Sc+Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case ModulusSubtractCompositeOp: { pixel=Sc-Dc; while (pixel > QuantumRange) pixel-=QuantumRange; while (pixel < 0.0) pixel+=QuantumRange; pixel=(Sa*Da*pixel+Sa*Sc*(1.0-Da)+Da*Dc*(1.0-Sa)); break; } case MultiplyCompositeOp: { pixel=QuantumRange*gamma*(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case OutCompositeOp: case SrcOutCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)); break; } case OverCompositeOp: case SrcOverCompositeOp: { pixel=QuantumRange*gamma*(Sca+Dca*(1.0-Sa)); break; } case OverlayCompositeOp: { if ((2.0*Dca) < Da) { pixel=QuantumRange*gamma*(2.0*Dca*Sca+Dca*(1.0-Sa)+Sca*(1.0- Da)); break; } pixel=QuantumRange*gamma*(Da*Sa-2.0*(Sa-Sca)*(Da-Dca)+Dca*(1.0-Sa)+ Sca*(1.0-Da)); break; } case PegtopLightCompositeOp: { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs((double) Da) < MagickEpsilon) { pixel=QuantumRange*gamma*(Sca); break; } pixel=QuantumRange*gamma*(Dca*Dca*(Sa-2.0*Sca)/Da+Sca*(2.0*Dca+1.0- Da)+Dca*(1.0-Sa)); break; } case PinLightCompositeOp: { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if ((Dca*Sa) < (Da*(2.0*Sca-Sa))) { pixel=QuantumRange*gamma*(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); break; } if ((Dca*Sa) > (2.0*Sca*Da)) { pixel=QuantumRange*gamma*(Sca*Da+Sca+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Sca*(1.0-Da)+Dca); break; } case PlusCompositeOp: { pixel=QuantumRange*(Sca+Dca); break; } case SaturateCompositeOp: { if (fabs((double) (QuantumRange*Sa-TransparentAlpha)) < MagickEpsilon) { pixel=Dc; break; } if (fabs((double) (QuantumRange*Da-TransparentAlpha)) < MagickEpsilon) { pixel=Sc; break; } CompositeHCL(canvas_pixel.red,canvas_pixel.green,canvas_pixel.blue, &hue,&chroma,&luma); CompositeHCL(source_pixel.red,source_pixel.green,source_pixel.blue, &sans,&chroma,&sans); HCLComposite(hue,chroma,luma,&red,&green,&blue); switch (channel) { case RedPixelChannel: pixel=red; break; case GreenPixelChannel: pixel=green; break; case BluePixelChannel: pixel=blue; break; default: pixel=Dc; break; } break; } case ScreenCompositeOp: { /* Screen: a negated multiply: f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ pixel=QuantumRange*gamma*(Sca+Dca-Sca*Dca); break; } case SoftLightCompositeOp: { if ((2.0*Sca) < Sa) { pixel=QuantumRange*gamma*(Dca*(Sa+(2.0*Sca-Sa)*(1.0-(Dca/Da)))+ Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*(Dca/Da)* (4.0*(Dca/Da)+1.0)*((Dca/Da)-1.0)+7.0*(Dca/Da))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa+Da*(2.0*Sca-Sa)*(pow((Dca/Da),0.5)- (Dca/Da))+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } case ThresholdCompositeOp: { MagickRealType delta; delta=Sc-Dc; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) { pixel=gamma*Dc; break; } pixel=gamma*(Dc+delta*amount); break; } case VividLightCompositeOp: { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs((double) Sa) < MagickEpsilon) || (fabs((double) (Sca-Sa)) < MagickEpsilon)) { pixel=QuantumRange*gamma*(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } if ((2.0*Sca) <= Sa) { pixel=QuantumRange*gamma*(Sa*(Da+Sa*(Dca-Da)/(2.0*Sca))+Sca* (1.0-Da)+Dca*(1.0-Sa)); break; } pixel=QuantumRange*gamma*(Dca*Sa*Sa/(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); break; } case XorCompositeOp: { pixel=QuantumRange*(Sca*(1.0-Da)+Dca*(1.0-Sa)); break; } default: { pixel=Sc; break; } } q[i]=clamp != MagickFalse ? ClampPixel(pixel) : ClampToQuantum(pixel); } p+=GetPixelChannels(source_image); channels=GetPixelChannels(source_image); if (p >= (pixels+channels*source_image->columns)) p=pixels; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CompositeImage) #endif proceed=SetImageProgress(image,CompositeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o texture_image: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture, ExceptionInfo *exception) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace,exception); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod, exception); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->alpha_trait != UndefinedPixelTrait) || (texture_image->alpha_trait != UndefinedPixelTrait))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,texture_image,image->compose, MagickTrue,x+texture_image->tile_offset.x,y+ texture_image->tile_offset.y,exception); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(texture_image,image,1,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *p, *pixels; register ssize_t x; register Quantum *q; size_t width; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x, (y+texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((pixels == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { register ssize_t j; p=pixels; width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; for (j=0; j < (ssize_t) width; j++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(texture_image); i++) { PixelChannel channel=GetPixelChannelChannel(texture_image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait texture_traits=GetPixelChannelTraits(texture_image, channel); if ((traits == UndefinedPixelTrait) || (texture_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(texture_image); q+=GetPixelChannels(image); } } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
parallel_section_firstprivate.c
/* This file contains all checks for the section construct without the checks for the reduction clauses: ordered: checks that the execution is equivalent to the serial case */ #include <stdio.h> #include "omp_testsuite.h" int check_parallel_section_firstprivate (FILE * logFile) { int sum = 7; int sum0 = 11; int known_sum; #pragma omp parallel sections firstprivate(sum0) { #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } } /*end of parallel sections */ known_sum = 11 * 3 + 7; return (known_sum == sum); } /* end of check_section_firstprivate */ int crosscheck_parallel_section_firstprivate (FILE * logFile) { int sum = 7; int sum0 = 11; int known_sum; #pragma omp parallel sections private(sum0) { #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } #pragma omp section { #pragma omp critical { sum = sum + sum0; } /*end of critical */ } } /*end of parallel sections */ known_sum = 11 * 3 + 7; return (known_sum == sum); } /* end of check_section_firstprivate */
IcgThreshold2.c
// Copyright (C) 2016 Gernot Riegler // Institute for Computer Graphics and Vision (ICG) // Graz University of Technology (TU GRAZ) // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // 3. All advertising materials mentioning features or use of this software // must display the following acknowledgement: // This product includes software developed by the ICG, TU GRAZ. // 4. Neither the name of the ICG, TU GRAZ nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // THIS SOFTWARE IS PROVIDED ''AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE PROVIDER BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/IcgThreshold2.c" #else static int icgnn_(IcgThreshold2_updateOutput)(lua_State *L) { THTensor* input1 = luaT_checkudata(L, 2, torch_Tensor); THTensor* input2 = luaT_checkudata(L, 3, torch_Tensor); real threshold = luaT_getfieldchecknumber(L, 1, "threshold"); real val = luaT_getfieldchecknumber(L, 1, "val"); THTensor* output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); input1 = THTensor_(newContiguous)(input1); input2 = THTensor_(newContiguous)(input2); real* in1 = THTensor_(data)(input1); real* in2 = THTensor_(data)(input2); THTensor_(resizeAs)(output, input1); real* out = THTensor_(data)(output); long nelem = THTensor_(nElement)(input1); luaL_argcheck(L, nelem == THTensor_(nElement)(input2), 2, "input1 should have same number of elements as input2"); long idx; #pragma omp parallel for private(idx) for(idx = 0; idx < nelem; ++idx) { out[idx] = in2[idx] <= threshold ? val : in1[idx]; } THTensor_(free)(input1); THTensor_(free)(input2); return 1; } static int icgnn_(IcgThreshold2_updateGradInput)(lua_State *L) { THTensor* input1 = luaT_checkudata(L, 2, torch_Tensor); THTensor* input2 = luaT_checkudata(L, 3, torch_Tensor); THTensor* grad_input1 = luaT_checkudata(L, 4, torch_Tensor); THTensor* grad_output = luaT_checkudata(L, 5, torch_Tensor); real threshold = luaT_getfieldchecknumber(L, 1, "threshold"); real val = luaT_getfieldchecknumber(L, 1, "val"); THTensor_(resizeAs)(grad_input1, input1); real* in1 = THTensor_(data)(input1); real* in2 = THTensor_(data)(input2); real* grad_in1 = THTensor_(data)(grad_input1); real* grad_out = THTensor_(data)(grad_output); long nelem = THTensor_(nElement)(input1); luaL_argcheck(L, nelem == THTensor_(nElement)(input2), 2, "input1 should have same number of elements as input2"); long idx; #pragma omp parallel for private(idx) for(idx = 0; idx < nelem; ++idx) { grad_in1[idx] = in2[idx] <= threshold ? 0 : grad_out[idx]; } return 1; } static const struct luaL_Reg icgnn_(IcgThreshold2__) [] = { {"IcgThreshold2_updateOutput", icgnn_(IcgThreshold2_updateOutput)}, {"IcgThreshold2_updateGradInput", icgnn_(IcgThreshold2_updateGradInput)}, {NULL, NULL} }; static void icgnn_(IcgThreshold2_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, icgnn_(IcgThreshold2__), "icgnn"); lua_pop(L,1); } #endif
9246.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for schedule(static) num_threads(2) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ break; \ case mshadow::kInt32: \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ break; \ case mshadow::kInt64: \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
J1OrbitalSoA.h
////////////////////////////////////////////////////////////////////////////////////// // This file is distributed under the University of Illinois/NCSA Open Source License. // See LICENSE file in top directory for details. // // Copyright (c) 2016 Jeongnim Kim and QMCPACK developers. // // File developed by: // // File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp. ////////////////////////////////////////////////////////////////////////////////////// // -*- C++ -*- #ifndef QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #define QMCPLUSPLUS_ONEBODYJASTROW_OPTIMIZED_SOA_H #include "Configuration.h" #include "QMCWaveFunctions/WaveFunctionComponent.h" #include "QMCWaveFunctions/Jastrow/DiffOneBodyJastrowOrbital.h" #include "Utilities/qmc_common.h" #include "CPU/SIMD/aligned_allocator.hpp" #include "CPU/SIMD/algorithm.hpp" #include <map> #include <numeric> namespace qmcplusplus { /** @ingroup WaveFunctionComponent * @brief Specialization for one-body Jastrow function using multiple functors */ template<class FT> struct J1OrbitalSoA : public WaveFunctionComponent { ///alias FuncType using FuncType = FT; ///type of each component U, dU, d2U; using valT = typename FT::real_type; ///element position type using posT = TinyVector<valT, OHMMS_DIM>; ///use the same container using DistRow = DistanceTableData::DistRow; using DisplRow = DistanceTableData::DisplRow; ///table index const int myTableID; ///number of ions int Nions; ///number of electrons int Nelec; ///number of groups int NumGroups; ///reference to the sources (ions) const ParticleSet& Ions; valT curAt; valT curLap; posT curGrad; ///\f$Vat[i] = sum_(j) u_{i,j}\f$ Vector<valT> Vat; aligned_vector<valT> U, dU, d2U, d3U; aligned_vector<valT> DistCompressed; aligned_vector<int> DistIndice; Vector<posT> Grad; Vector<valT> Lap; ///Container for \f$F[ig*NumGroups+jg]\f$ std::vector<FT*> F; J1OrbitalSoA(const std::string& obj_name, const ParticleSet& ions, ParticleSet& els) : WaveFunctionComponent("J1OrbitalSoA", obj_name), myTableID(els.addTable(ions)), Ions(ions) { if (myName.empty()) throw std::runtime_error("J1OrbitalSoA object name cannot be empty!"); initialize(els); } J1OrbitalSoA(const J1OrbitalSoA& rhs) = delete; ~J1OrbitalSoA() { for (int i = 0; i < F.size(); ++i) if (F[i] != nullptr) delete F[i]; } /* initialize storage */ void initialize(const ParticleSet& els) { Nions = Ions.getTotalNum(); NumGroups = Ions.getSpeciesSet().getTotalNum(); F.resize(std::max(NumGroups, 4), nullptr); if (NumGroups > 1 && !Ions.IsGrouped) { NumGroups = 0; } Nelec = els.getTotalNum(); Vat.resize(Nelec); Grad.resize(Nelec); Lap.resize(Nelec); U.resize(Nions); dU.resize(Nions); d2U.resize(Nions); d3U.resize(Nions); DistCompressed.resize(Nions); DistIndice.resize(Nions); } void addFunc(int source_type, FT* afunc, int target_type = -1) { if (F[source_type] != nullptr) delete F[source_type]; F[source_type] = afunc; } void recompute(ParticleSet& P) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { computeU3(P, iat, d_ie.getDistRow(iat)); Vat[iat] = simd::accumulate_n(U.data(), Nions, valT()); Lap[iat] = accumulateGL(dU.data(), d2U.data(), d_ie.getDisplRow(iat), Grad[iat]); } } LogValueType evaluateLog(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L) { return evaluateGL(P, G, L, true); } void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi) { const DistanceTableData& d_ie(P.getDistTable(myTableID)); valT dudr, d2udr2; Tensor<valT, DIM> ident; grad_grad_psi = 0.0; ident.diagonal(1.0); for (int iel = 0; iel < Nelec; ++iel) { const auto& dist = d_ie.getDistRow(iel); const auto& displ = d_ie.getDisplRow(iel); for (int iat = 0; iat < Nions; iat++) { int gid = Ions.GroupID[iat]; auto* func = F[gid]; if (func != nullptr) { RealType r = dist[iat]; RealType rinv = 1.0 / r; PosType dr = displ[iat]; func->evaluate(r, dudr, d2udr2); grad_grad_psi[iel] -= rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv; } } } } PsiValueType ratio(ParticleSet& P, int iat) { UpdateMode = ORB_PBYP_RATIO; curAt = computeU(P.getDistTable(myTableID).getTempDists()); return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } inline void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios) { for (int k = 0; k < ratios.size(); ++k) ratios[k] = std::exp(Vat[VP.refPtcl] - computeU(VP.getDistTable(myTableID).getDistRow(k))); } inline valT computeU(const DistRow& dist) { valT curVat(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curVat += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curVat += F[gid]->evaluate(dist[c]); } } return curVat; } void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios) { const auto& dist = P.getDistTable(myTableID).getTempDists(); curAt = valT(0); if (NumGroups > 0) { for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] != nullptr) curAt += F[jg]->evaluateV(-1, Ions.first(jg), Ions.last(jg), dist.data(), DistCompressed.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) curAt += F[gid]->evaluate(dist[c]); } } for (int i = 0; i < Nelec; ++i) ratios[i] = std::exp(Vat[i] - curAt); } inline LogValueType evaluateGL(ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L, bool fromscratch = false) { if (fromscratch) recompute(P); for (size_t iat = 0; iat < Nelec; ++iat) G[iat] += Grad[iat]; for (size_t iat = 0; iat < Nelec; ++iat) L[iat] -= Lap[iat]; return LogValue = -simd::accumulate_n(Vat.data(), Nelec, valT()); } /** compute gradient and lap * @return lap */ inline valT accumulateGL(const valT* restrict du, const valT* restrict d2u, const DisplRow& displ, posT& grad) const { valT lap(0); constexpr valT lapfac = OHMMS_DIM - RealType(1); //#pragma omp simd reduction(+:lap) for (int jat = 0; jat < Nions; ++jat) lap += d2u[jat] + lapfac * du[jat]; for (int idim = 0; idim < OHMMS_DIM; ++idim) { const valT* restrict dX = displ.data(idim); valT s = valT(); //#pragma omp simd reduction(+:s) for (int jat = 0; jat < Nions; ++jat) s += du[jat] * dX[jat]; grad[idim] = s; } return lap; } /** compute U, dU and d2U * @param P quantum particleset * @param iat the moving particle * @param dist starting address of the distances of the ions wrt the iat-th particle */ inline void computeU3(ParticleSet& P, int iat, const DistRow& dist) { if (NumGroups > 0) { //ions are grouped constexpr valT czero(0); std::fill_n(U.data(), Nions, czero); std::fill_n(dU.data(), Nions, czero); std::fill_n(d2U.data(), Nions, czero); for (int jg = 0; jg < NumGroups; ++jg) { if (F[jg] == nullptr) continue; F[jg]->evaluateVGL(-1, Ions.first(jg), Ions.last(jg), dist.data(), U.data(), dU.data(), d2U.data(), DistCompressed.data(), DistIndice.data()); } } else { for (int c = 0; c < Nions; ++c) { int gid = Ions.GroupID[c]; if (F[gid] != nullptr) { U[c] = F[gid]->evaluate(dist[c], dU[c], d2U[c]); dU[c] /= dist[c]; } } } } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index */ GradType evalGrad(ParticleSet& P, int iat) { return GradType(Grad[iat]); } /** compute the gradient during particle-by-particle update * @param P quantum particleset * @param iat particle index * * Using getTempDists(). curAt, curGrad and curLap are computed. */ PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat) { UpdateMode = ORB_PBYP_PARTIAL; computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); curAt = simd::accumulate_n(U.data(), Nions, valT()); grad_iat += curGrad; return std::exp(static_cast<PsiValueType>(Vat[iat] - curAt)); } /** Rejected move. Nothing to do */ inline void restore(int iat) {} /** Accpted move. Update Vat[iat],Grad[iat] and Lap[iat] */ void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false) { if (UpdateMode == ORB_PBYP_RATIO) { computeU3(P, iat, P.getDistTable(myTableID).getTempDists()); curLap = accumulateGL(dU.data(), d2U.data(), P.getDistTable(myTableID).getTempDispls(), curGrad); } LogValue += Vat[iat] - curAt; Vat[iat] = curAt; Grad[iat] = curGrad; Lap[iat] = curLap; } inline void registerData(ParticleSet& P, WFBufferType& buf) { if (Bytes_in_WFBuffer == 0) { Bytes_in_WFBuffer = buf.current(); buf.add(Vat.begin(), Vat.end()); buf.add(Grad.begin(), Grad.end()); buf.add(Lap.begin(), Lap.end()); Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer; // free local space Vat.free(); Grad.free(); Lap.free(); } else { buf.forward(Bytes_in_WFBuffer); } } inline LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false) { evaluateGL(P, P.G, P.L, false); buf.forward(Bytes_in_WFBuffer); return LogValue; } inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf) { Vat.attachReference(buf.lendReference<valT>(Nelec), Nelec); Grad.attachReference(buf.lendReference<posT>(Nelec), Nelec); Lap.attachReference(buf.lendReference<valT>(Nelec), Nelec); } WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const { J1OrbitalSoA<FT>* j1copy = new J1OrbitalSoA<FT>(myName, Ions, tqp); j1copy->Optimizable = Optimizable; for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) j1copy->addFunc(i, new FT(*F[i])); } if (dPsi) { j1copy->dPsi = dPsi->makeClone(tqp); } return j1copy; } /**@{ WaveFunctionComponent virtual functions that are not essential for the development */ void reportStatus(std::ostream& os) { for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) F[i]->myVars.print(os); } } void checkInVariables(opt_variables_type& active) { myVars.clear(); for (size_t i = 0, n = F.size(); i < n; ++i) { if (F[i] != nullptr) { F[i]->checkInVariables(active); F[i]->checkInVariables(myVars); } } } void checkOutVariables(const opt_variables_type& active) { myVars.getIndex(active); Optimizable = myVars.is_optimizable(); for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->checkOutVariables(active); if (dPsi) dPsi->checkOutVariables(active); } void resetParameters(const opt_variables_type& active) { if (!Optimizable) return; for (size_t i = 0, n = F.size(); i < n; ++i) if (F[i] != nullptr) F[i]->resetParameters(active); for (int i = 0; i < myVars.size(); ++i) { int ii = myVars.Index[i]; if (ii >= 0) myVars[i] = active[ii]; } if (dPsi) dPsi->resetParameters(active); } /**@} */ inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); g_return -= dU[isrc] * rinv * dr; } } return g_return; } inline GradType evalGradSource(ParticleSet& P, ParticleSet& source, int isrc, TinyVector<ParticleSet::ParticleGradient_t, OHMMS_DIM>& grad_grad, TinyVector<ParticleSet::ParticleLaplacian_t, OHMMS_DIM>& lapl_grad) { GradType g_return(0.0); const DistanceTableData& d_ie(P.getDistTable(myTableID)); for (int iat = 0; iat < Nelec; ++iat) { const auto& dist = d_ie.getDistRow(iat); const auto& displ = d_ie.getDisplRow(iat); int gid = source.GroupID[isrc]; RealType r = dist[isrc]; RealType rinv = 1.0 / r; PosType dr = displ[isrc]; if (F[gid] != nullptr) { U[isrc] = F[gid]->evaluate(dist[isrc], dU[isrc], d2U[isrc], d3U[isrc]); } else { APP_ABORT("J1OrbitalSoa::evaluateGradSource: F[gid]==nullptr") } g_return -= dU[isrc] * rinv * dr; //The following terms depend only on the radial component r. Thus, //we compute them and mix with position vectors to acquire the full //cartesian vector objects. valT grad_component = (d2U[isrc] - dU[isrc] * rinv); valT lapl_component = d3U[isrc] + 2 * rinv * grad_component; for (int idim = 0; idim < OHMMS_DIM; idim++) { grad_grad[idim][iat] += dr[idim] * dr * rinv * rinv * grad_component; grad_grad[idim][iat][idim] += rinv * dU[isrc]; lapl_grad[idim][iat] -= lapl_component * rinv * dr[idim]; } } return g_return; } }; } // namespace qmcplusplus #endif
SliceableArrays.h
/* * MIT License * * Copyright (c) 2017 Daniel Politte * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files(the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions : * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <iostream> #include <cassert> #include <numeric> #include <vector> #include <array> #ifndef __H_SLICEABLEARRAYS #define __H_SLICEABLEARRAYS namespace SliceableArrays { /* * Sliceable Arrays * * Features: * - 0-indexed * - Column-major * - Arbitrarily high dimensionality, known at compile time * - Fixed-size * - Dimensions need not be known until runtime * * descended from class described at: * http://www.cplusplus.com/forum/articles/17108/ * * AN IMPORTANT DETAIL different from usual C arrays: * The elements are stored internally in 2nd dimension-major order. That is, a * 2D array with dimensions n by m has a memory layout of m series n elements, * in which each series shares a common 2nd index. The first index changes the * fastest as memory is traversed. This also applies to the higher-dimensioned * arrays; the first dimension changes the fastest, and the last dimension * changes most slowly. * */ template <typename T, size_t NDIMS> class ArrayND { private: const std::array<size_t, NDIMS> dims_; const size_t numEls_; bool dataAutoDestroy_; T *arr_; size_t getFlattenedIndex(std::vector<size_t>& position) const { // make sure we have the proper number of indices if (position.size() != NDIMS) { std::cerr << "Illegally-sized index into ArrayND!" << std::endl; } position.resize(NDIMS, 0); // tack on additional zeros as necc. // If we're debugging, check out validity of each position vector item (i.e., less than its dim's size) assert(indexValid(position, dims_)); size_t index = 0; for (size_t i = dims_.size() - 1; i > 0; --i) { index += position[i]; index *= dims_[i - 1]; } index += position[0]; return index; } size_t buildEndOfIndex(size_t posThis) { // The tail return posThis; } template<typename... Args> size_t buildEndOfIndex(size_t posThis, Args... posRest) { const size_t correspondingDimensionIndex = NDIMS - (1 + sizeof...(posRest)); // If we could have the full position list as a vector, posThis would be pos[correspondingDimensionIndex] size_t tailResult = buildEndOfIndex(posRest...); return posThis + (dims_[correspondingDimensionIndex] * tailResult); } static bool indexValid(const std::vector<size_t>& pos, const std::array<size_t, NDIMS>& dims) { for (size_t i = 0; i < pos.size(); ++i) { if (pos[i] >= dims[i]) { return false; } } return true; } static void compactArgsToArrayInner(std::array<size_t, NDIMS>& resultArray, size_t only) { resultArray[NDIMS - 1] = only; } template<typename... Args> static void compactArgsToArrayInner(std::array<size_t, NDIMS>& resultArray, size_t first, Args... vals) { resultArray[NDIMS - (sizeof...(vals)+1)] = first; // the +1 is to account for first not being counted among the args compactArgsToArrayInner(resultArray, vals...); } template<typename... Args> static std::array<size_t, NDIMS> compactArgsToArray(Args... vals) { static_assert(sizeof...(vals) == NDIMS, "Requires NDIMS arguments exactly"); std::array<size_t, NDIMS> resultArray; // an array we'll gradually fill with copies of the relavent values compactArgsToArrayInner(resultArray, vals...); return resultArray; } // allocates space on heap for this matrix void initializeData() { if (numEls_ > 0) { arr_ = new T[numEls_]; } } // Deallocate space we used, if applicable void destroyDataIfNecessary() { if (dataAutoDestroy_) { delete[] arr_; } } public: /* * Constructor & Deconstructor: The ArrayND class is responsible for the * management of the space allocated/adopted for the matrix's data */ // Constructor in which user doesn't provide a data pointer, and space is automatically alloc'd and marked for destruction template<typename... Args> ArrayND(size_t dim1, Args... remainingDims) : dims_(compactArgsToArray(dim1, remainingDims...)), numEls_(accumulate(dims_.begin(), dims_.end(), 1, [](size_t a, size_t b) -> size_t {return a*b; })), dataAutoDestroy_(true), arr_(0) { static_assert(NDIMS == sizeof...(remainingDims)+1, "ArrayND constructor requires exactly as many size arguments as the array has dimensions"); initializeData(); } // Constructor in which user provides a data pointer, and that data is not marked for destruction template<typename... Args> ArrayND(T* dataPtr, size_t dim1, Args... remainingDims) : dims_(compactArgsToArray(dim1, remainingDims...)), numEls_(accumulate(dims_.begin(), dims_.end(), 1, [](size_t a, size_t b) -> size_t {return a*b; })), dataAutoDestroy_(false), arr_(dataPtr) { static_assert(NDIMS == sizeof...(remainingDims)+1, "ArrayND constructor requires exactly as many size arguments as the array has dimensions"); } // copy constructor: deep copy of data ArrayND(ArrayND& other) : arr_(0), dataAutoDestroy_(true), // since we always allocate space here numEls_(other.numEls_), dims_(other.dims_) { initializeData(); // allocating space automatically for (size_t i = 0; i < numEls_; ++i) { arr_[i] = other.arr_[i]; } } // copy assignment operator: deep copy of data ArrayND& operator=(ArrayND& other) { dataAutoDestroy_ = true; // since we always allocate space here numEls_ = other.numEls_; dims_ = other.dims_; initializeData(); // allocating space automatically for (size_t i = 0; i < numEls_; ++i) { arr_[i] = other.arr_[i]; } return *this; } // move constructor ArrayND(ArrayND&& other) noexcept : arr_(other.arr_), dataAutoDestroy_(other.dataAutoDestroy_), numEls_(other.numEls_), dims_(other.dims_) { // get old one to state where running destructor is safe other.arr_ = nullptr; } // move assignment operator ArrayND& operator=(ArrayND&& other) { if (this != &rhs) { destroyDataIfNecessary(); // flush data that was at new location, if any arr_ = other.arr_; dataAutoDestroy_ = other.dataAutoDestroy_; numEls_ = other.numEls_; dims_ = other.dims_; // get old one to state where running destructor is safe other.arr_ = nullptr; } return *this; } // destructor ~ArrayND() { destroyDataIfNecessary(); } // get dims size_t numEls() const { return numEls_; } // Get the number of elements along the nth dimension (counting from 0, of course) size_t getDim(size_t n) const { assert(n < dims_.size()); return dims_[n]; } // 1-D indexing, which is common to all dimensionalities T ind(size_t i) const { assert(i >= 0 && i < numEls_); return arr_[i]; } T& ind(size_t i) { assert(i >= 0 && i < numEls_); return arr_[i]; } T operator[](size_t i) const { return ind(i); } T& operator[](size_t i) { return ind(i); } // non-general multi-D indexing, only available to certain dimensionalities T ind(size_t p0, size_t p1) const { static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions"); return ind(getOffsetAtIndex(p0, p1)); } T& ind(size_t p0, size_t p1) { static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions"); return ind(getOffsetAtIndex(p0, p1)); } size_t getOffsetAtIndex(size_t p0, size_t p1) const { static_assert(NDIMS == 2, "Only 2D arrays can be indexed with 2 dimensions"); size_t m = getDim(0); size_t n = getDim(1); assert(p0 < m && p1 < n); return p1*m + p0; } T ind(size_t p0, size_t p1, size_t p2) const { static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2)); } T& ind(size_t p0, size_t p1, size_t p2) { static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2)); } size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2) const { static_assert(NDIMS == 3, "Only 3D arrays can be indexed with 3 dimensions"); size_t m = getDim(0); size_t n = getDim(1); size_t p = getDim(2); assert(p0 < m && p1 < n && p2 < p); return (p2*n + p1)*m + p0; } T ind(size_t p0, size_t p1, size_t p2, size_t p3) const { static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3)); } T& ind(size_t p0, size_t p1, size_t p2, size_t p3) { static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3)); } size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3) const { static_assert(NDIMS == 4, "Only 4D arrays can be indexed with 4 dimensions"); size_t m = getDim(0); size_t n = getDim(1); size_t p = getDim(2); size_t q = getDim(3); assert(p0 < m && p1 < n && p2 < p && p3 < q); return ((p3*p + p2)*n + p1)*m + p0; } T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) const { static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4)); } T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) { static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4)); } size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) const { static_assert(NDIMS == 5, "Only 5D arrays can be indexed with 5 dimensions"); size_t m = getDim(0); size_t n = getDim(1); size_t p = getDim(2); size_t q = getDim(3); size_t r = getDim(4); assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r); return (((p4*q + p3)*p + p2)*n + p1)*m + p0; } T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) const { static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5)); } T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) { static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5)); } size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5) const { static_assert(NDIMS == 6, "Only 6D arrays can be indexed with 6 dimensions"); size_t m = getDim(0); size_t n = getDim(1); size_t p = getDim(2); size_t q = getDim(3); size_t r = getDim(4); size_t s = getDim(5); assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r && p5 < s); return ((((p5*r + p4)*q + p3)*p + p2)*n + p1)*m + p0; } T ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) const { static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5, p6)); } T& ind(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) { static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions"); return ind(getOffsetAtIndex(p0, p1, p2, p3, p4, p5, p6)); } size_t getOffsetAtIndex(size_t p0, size_t p1, size_t p2, size_t p3, size_t p4, size_t p5, size_t p6) const { static_assert(NDIMS == 7, "Only 7D arrays can be indexed with 7 dimensions"); size_t m = getDim(0); size_t n = getDim(1); size_t p = getDim(2); size_t q = getDim(3); size_t r = getDim(4); size_t s = getDim(5); size_t t = getDim(6); assert(p0 < m && p1 < n && p2 < p && p3 < q && p4 < r && p5 < s && p6 < t); return (((((p6*s + p5)*r + p4)*q + p3)*p + p2)*n + p1)*m + p0; } // TODO: this should be destroyed if general version of 'ind' is good T indexGeneral(const std::vector<size_t>& position) const { // A slow but general indexing function, so larger dimensions can kinda stand on its own size_t index = getFlattenedIndex(position); return ind(index); } T& indexGeneral(const std::vector<size_t>& position) { // A slow but general indexing function, so larger dimensions can kinda stand on its own size_t index = getFlattenedIndex(position); return ind(index); } template<typename... Args> T ind(size_t posFirst, Args... posRest) const { // Generic offset computation for arbitrarily high dimensions. // This offset building is equiv to using getFlattenedIndex if we // had all the position elements in a container size_t offset = buildEndOfIndex(posFirst, posRest...); return ind(offset); } template<typename... Args> T& ind(size_t posFirst, Args... posRest) { // Generic offset computation for arbitrarily high dimensions. // This offset building is equiv to using getFlattenedIndex if we // had all the position elements in a container size_t offset = buildEndOfIndex(posFirst, posRest...); return ind(offset); } // reset entire matrix to a value void fill(T val) { #pragma omp parallel for for (int i_tmp = 0; i_tmp < (int)numEls_; ++i_tmp) { size_t i = i_tmp; // To provide OpenMP 2.0 compatibility arr_[i] = val; } } // Retrieves the location of internal data T* getData() const { return arr_; } // Modifies the location of internal data. Will not autodestroy this new location by default void setData(T *data) { arr_ = data; dataAutoDestroy_ = true; } // choose whether the internal data of this array will be deleted when it is destructed. // The default value when this has not been called is true. void setDataAutoDestroy(bool isDataForfeit) { dataAutoDestroy_ = isDataForfeit; } bool getDataAutoDestroy() { return dataAutoDestroy_; } }; template <typename T> using Array1D = ArrayND<T, 1>; template <typename T> using Array2D = ArrayND<T, 2>; template <typename T> using Array3D = ArrayND<T, 3>; template <typename T> using Array4D = ArrayND<T, 4>; template <typename T> using Array5D = ArrayND<T, 5>; template <typename T> using Array6D = ArrayND<T, 6>; template <typename T> using Array7D = ArrayND<T, 7>; }; #endif /* __H_SLICEABLEARRAYS */