source
stringlengths
3
92
c
stringlengths
26
2.25M
StreamTriad_par5.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> #include "timer.h" int main(int argc, char *argv[]){ int nsize = 20000000, ntimes=16; double *a, *b, *c; cudaMalloc((void *)&a,nsize*sizeof(double)); cudaMalloc((void *)&b,nsize*sizeof(double)); cudaMalloc((void *)&c,nsize*sizeof(double)); #pragma target enter data use_device_ptr(a, b, c) struct timespec tstart; // initializing data and arrays double scalar = 3.0, time_sum = 0.0; #pragma omp target teams distribute parallel for simd is_device_ptr(a, b) for (int i=0; i<nsize; i++) { a[i] = 1.0; b[i] = 2.0; } for (int k=0; k<ntimes; k++){ cpu_timer_start(&tstart); // stream triad loop #pragma omp target teams distribute parallel for simd is_device_ptr(a, b, c) for (int i=0; i<nsize; i++){ c[i] = a[i] + scalar*b[i]; } time_sum += cpu_timer_stop(tstart); } printf("Average runtime for stream triad loop is %lf msecs\n", time_sum/ntimes); #pragma target exit data use_device_ptr(a, b, c) cudaFree(a); cudaFree(b); cudaFree(c); return(0); }
skinny.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simmulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdbool.h> // #define DEBUG 1 #define Nthreads 12 // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { //int initial_seed = 0x5EC7F2B0; //int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = 10*time(NULL) + 11*offset; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int difference(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1) { int i; unsigned char p1[16], p2[16]; unsigned char k1[48], k2[48]; // randomly choose k1 for (i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; int num = 0; for (int t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) p1[i] = rand() & 0xff; // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) p2[i] = p1[i] ^ dp[i]; enc(p1, k1, ver, r); enc(p2, k2, ver, r); // check output difference bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((p1[i] ^ p2[i]) != dc[i]) flag = 0; if (flag) { num++; } } return num; } double send_difference(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (int j = 0; j < N2; j++) { num += difference(R, ver, N3, dp, dc, dk1); } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); // init_prng(1); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int n = 5; // Number of independent experiments int R = 6; // Number of rounds int ver = 4; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "00000000000000000000000000000000"; char dc_str[] = "40400040004000000000184000400040"; char dk1_str[] = "00000000000000000000f8000000000000000000000000000000fc0000000000"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg = 8; int N2 = 1 << deg; // Number of bunches per threads : N2 = 2^(deg) int N3 = 1 << 8; // Number of queries per bunches : N3 //################### Number of total queries : N1*N2*N3 ############### double sum = 0; for (int i = 0; i < n; i++) { sum += send_difference(R, ver, N1, N2, N3, dp, dc, dk1); } printf("\nAverage = 2^(-%0.4f)\n", (log(n) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); // sum = (double)(n * N1 * N2 * N3) / sum; // printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2)); return 0; }
sp_vector.h
#ifndef SP_VECTOR_H #define SP_VECTOR_H #include <fstream> #ifdef WINDOWS #include <string> #else #include <cstring> #endif #include "../declare_structures.h" /// Sparse vector class template <typename floating_type, typename I> class SpVector { friend class Matrix<floating_type>; friend class SpMatrix<floating_type,I>; friend class Vector<floating_type>; public: typedef floating_type value_type; /// Constructor, of the sparse vector of size L. SpVector(floating_type* v, I* r, I L, I nzmax); /// Constructor, allocates nzmax slots SpVector(I nzmax); /// Empty constructor SpVector(); /// Destructor ~SpVector(); /// Accessors /// returns the length of the vector inline floating_type nzmax() const { return _nzmax; }; /// returns the length of the vector inline floating_type length() const { return _L; }; /// computes the sum of the magnitude of the elements inline floating_type asum() const; /// computes the l2 norm ^2 of the vector inline floating_type nrm2sq() const; /// computes the l2 norm of the vector inline floating_type nrm2() const; /// computes the linf norm of the vector inline floating_type fmaxval() const; /// print the vector to std::cerr inline void print(const std::string& name) const; inline void refIndices(Vector<I>& indices) const; /// creates a reference on the vector val inline void refVal(Vector<floating_type>& val) const; /// access table r inline I r(const I i) const { return _r[i]; }; /// access table r inline floating_type v(const I i) const { return _v[i]; }; inline floating_type* rawX() const { return _v; }; inline I* rawR() const { return _r; }; /// inline I L() const { return _L; }; /// inline void setL(const I L) { _L=L; }; /// a <- a.^2 inline void sqr(); /// dot product inline floating_type dot(const SpVector<floating_type,I>& vec) const; /// dot product inline floating_type dot(const Vector<floating_type>& vec) const; /// dot product inline void scal(const floating_type a); /// Modifiers /// clears the vector inline void clear(); /// resizes the vector inline void resize(const I nzmax); /// resize the vector as a sparse matrix void inline toSpMatrix(SpMatrix<floating_type,I>& out, const I m, const I n) const; /// resize the vector as a sparse matrix void inline toFull(Vector<floating_type>& out) const; inline void getIndices(Vector<int>& ind) const; private: /// forbids lazy copies explicit SpVector(const SpVector<floating_type,I>& vector); SpVector<floating_type,I>& operator=(const SpVector<floating_type,I>& vector); /// external allocation bool _externAlloc; /// data floating_type* _v; /// indices I* _r; /// length I _L; /// maximum number of nonzeros elements I _nzmax; }; /* *************************** * Implementation of SpVector * ***************************/ /// Constructor, of the sparse vector of size L. template <typename floating_type, typename I> SpVector<floating_type,I>::SpVector(floating_type* v, I* r, I L, I nzmax) : _externAlloc(true), _v(v), _r(r), _L(L), _nzmax(nzmax) { }; /// Constructor, allocates nzmax slots template <typename floating_type, typename I> SpVector<floating_type,I>::SpVector(I nzmax) : _externAlloc(false), _L(0), _nzmax(nzmax) { #pragma omp critical { _v = new floating_type[nzmax]; _r = new I[nzmax]; } }; /// Empty constructor template <typename floating_type, typename I> SpVector<floating_type,I>::SpVector() : _externAlloc(true), _v(NULL), _r(NULL), _L(0), _nzmax(0) { }; /// Destructor template <typename floating_type, typename I> SpVector<floating_type,I>::~SpVector() { clear(); }; /// computes the sum of the magnitude of the elements template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::asum() const { return cblas_asum<floating_type>(_L,_v,1); }; /// computes the l2 norm ^2 of the vector template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::nrm2sq() const { return cblas_dot<floating_type>(_L,_v,1,_v,1); }; /// computes the l2 norm of the vector template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::nrm2() const { return cblas_nrm2<floating_type>(_L,_v,1); }; /// computes the l2 norm of the vector template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::fmaxval() const { Vector<floating_type> tmp(_v,_L); return tmp.fmaxval(); }; /// print the vector to std::cerr template <typename floating_type, typename I> inline void SpVector<floating_type,I>::print(const std::string& name) const { logging(logERROR) << name; logging(logERROR) << _nzmax; for (I i = 0; i<_L; ++i) logging(logERROR) << "(" <<_r[i] << ", " << _v[i] << ")"; }; /// create a reference on the vector r template <typename floating_type, typename I> inline void SpVector<floating_type,I>::refIndices( Vector<I>& indices) const { indices.setPointer(_r,_L); }; template <typename floating_type, typename I> inline void SpVector<floating_type,I>::getIndices(Vector<int>& indices) const { // indices.resize(_L); indices.setn(_L); for (int ii=0; ii<_L; ++ii) indices[ii]=_r[ii]; }; /// creates a reference on the vector val template <typename floating_type, typename I> inline void SpVector<floating_type,I>::refVal( Vector<floating_type>& val) const { val.setPointer(_v,_L); }; /// a <- a.^2 template <typename floating_type, typename I> inline void SpVector<floating_type,I>::sqr() { vSqr<floating_type>(_L,_v,_v); }; template <typename floating_type, typename I> inline void SpVector<floating_type,I>::scal(const floating_type a) { cblas_scal<floating_type>(_L,a,_v,1); }; template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::dot(const SpVector<floating_type,I>& vec) const { floating_type sum=floating_type(); I countI = 0; I countJ = 0; while (countI < _L && countJ < vec._L) { const I rI = _r[countI]; const I rJ = vec._r[countJ]; if (rI > rJ) { ++countJ; } else if (rJ > rI) { ++countI; } else { sum+=_v[countI]*vec._v[countJ]; ++countI; ++countJ; } } return sum; }; template <typename floating_type, typename I> inline floating_type SpVector<floating_type,I>::dot(const Vector<floating_type>& vec) const { //return cblas_doti(_L,_v,_r,vec.rawX()); floating_type sum=floating_type(); for (int countI=0; countI < _L; ++countI) sum+=_v[countI]*vec[_r[countI]]; return sum; }; /// clears the vector template <typename floating_type, typename I> inline void SpVector<floating_type,I>::clear() { if (!_externAlloc) { delete[](_v); delete[](_r); } _v=NULL; _r=NULL; _L=0; _nzmax=0; _externAlloc=true; }; /// resizes the vector template <typename floating_type, typename I> inline void SpVector<floating_type,I>::resize(const I nzmax) { if (_nzmax != nzmax) { clear(); _nzmax=nzmax; _L=0; _externAlloc=false; #pragma omp critical { _v=new floating_type[nzmax]; _r=new I[nzmax]; } } }; template <typename floating_type, typename I> void inline SpVector<floating_type,I>::toSpMatrix( SpMatrix<floating_type,I>& out, const I m, const I n) const { out.resize(m,n,_L); cblas_copy<floating_type>(_L,_v,1,out._v,1); I current_col=0; I* out_r=out._r; I* out_pB=out._pB; out_pB[0]=current_col; for (I i = 0; i<_L; ++i) { I col=_r[i]/m; if (col > current_col) { out_pB[current_col+1]=i; current_col++; i--; } else { out_r[i]=_r[i]-col*m; } } for (current_col++ ; current_col < n+1; ++current_col) out_pB[current_col]=_L; }; template <typename floating_type, typename I> void inline SpVector<floating_type,I>::toFull(Vector<floating_type>& out) const { out.setZeros(); floating_type* X = out.rawX(); for (I i = 0; i<_L; ++i) X[_r[i]]=_v[i]; }; #endif
GB_binop__plus_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_08__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_02__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_04__plus_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int8) // A*D function (colscale): GB (_AxD__plus_int8) // D*A function (rowscale): GB (_DxB__plus_int8) // C+=B function (dense accum): GB (_Cdense_accumB__plus_int8) // C+=b function (dense accum): GB (_Cdense_accumb__plus_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int8) // C=scalar+B GB (_bind1st__plus_int8) // C=scalar+B' GB (_bind1st_tran__plus_int8) // C=A+scalar GB (_bind2nd__plus_int8) // C=A'+scalar GB (_bind2nd_tran__plus_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = (aij + bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x + y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_INT8 || GxB_NO_PLUS_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = (x + bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij + y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x + aij) ; \ } GrB_Info GB (_bind1st_tran__plus_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij + y) ; \ } GrB_Info GB (_bind2nd_tran__plus_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
explicit_solver_strategy.h
// // Authors: // Miguel Angel Celigueta maceli@cimne.upc.edu // Miquel Santasusana msantasusana@cimne.upc.edu // #if !defined(KRATOS_EXPLICIT_SOLVER_STRATEGY) #define KRATOS_EXPLICIT_SOLVER_STRATEGY // Project includes #include "utilities/timer.h" #include "custom_elements/Particle_Contact_Element.h" #include "includes/variables.h" #include "includes/deprecated_variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> #include <time.h> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif #define CUSTOMTIMER 0 // ACTIVATES AND DISABLES ::TIMER::::: #include "includes/define.h" #include "utilities/openmp_utils.h" #include "includes/model_part.h" #include "solving_strategies/strategies/implicit_solving_strategy.h" #include "solving_strategies/schemes/scheme.h" #include "custom_strategies/schemes/dem_integration_scheme.h" #include "custom_utilities/create_and_destroy.h" #include "custom_utilities/dem_fem_utilities.h" #include "custom_utilities/GeometryFunctions.h" #include "custom_utilities/inlet.h" #include "custom_elements/cluster3D.h" #include "custom_elements/rigid_body_element.h" ////Cfeng #include "custom_utilities/dem_fem_search.h" #include "custom_utilities/discrete_particle_configure.h" #include "custom_utilities/rigid_face_geometrical_object_configure.h" #ifdef USING_CGAL #include <CGAL/spatial_sort.h> #endif /* Timer defines */ #ifdef CUSTOMTIMER #define KRATOS_TIMER_START(t) Timer::Start(t); #define KRATOS_TIMER_STOP(t) Timer::Stop(t); #else #define KRATOS_TIMER_START(t) #define KRATOS_TIMER_STOP(t) #endif namespace Kratos { class ExplicitSolverSettings { public: KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverSettings); ExplicitSolverSettings() { } ~ExplicitSolverSettings() { } ModelPart* r_model_part; ModelPart* contact_model_part; ModelPart* fem_model_part; ModelPart* cluster_model_part; ModelPart* inlet_model_part; }; class KRATOS_API(DEM_APPLICATION) ExplicitSolverStrategy { public: typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ElementsArrayType::iterator ElementsIterator; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::NodesContainerType::ContainerType NodesContainerType; typedef ModelPart::ElementsContainerType::ContainerType ElementsContainerType; typedef ModelPart::ConditionsContainerType::ContainerType ConditionsContainerType; typedef SpatialSearch::ResultElementsContainerType ResultElementsContainerType; typedef SpatialSearch::VectorResultElementsContainerType VectorResultElementsContainerType; typedef SpatialSearch::RadiusArrayType RadiusArrayType; typedef SpatialSearch::DistanceType DistanceType; typedef SpatialSearch::VectorDistanceType VectorDistanceType; typedef SpatialSearch::ResultConditionsContainerType ResultConditionsContainerType; typedef SpatialSearch::VectorResultConditionsContainerType VectorResultConditionsContainerType; typedef PointerVectorSet<Properties, IndexedObject> PropertiesContainerType; typedef PropertiesContainerType::iterator PropertiesIterator; typedef DiscreteParticleConfigure<3> ElementConfigureType; typedef RigidFaceGeometricalObjectConfigure<3> RigidFaceGeometricalConfigureType; typedef Variable<double> ComponentOf3ComponentsVariableType; /// Pointer definition of ExplicitSolverStrategy KRATOS_CLASS_POINTER_DEFINITION(ExplicitSolverStrategy); ExplicitSolverStrategy() { } ExplicitSolverStrategy(ExplicitSolverSettings& settings, const double max_delta_time, const int n_step_search, const double safety_factor, const int delta_option, ParticleCreatorDestructor::Pointer p_creator_destructor, DEM_FEM_Search::Pointer p_dem_fem_search, SpatialSearch::Pointer pSpSearch, Parameters strategy_parameters) { mParameters = strategy_parameters; mDeltaOption = delta_option; mpParticleCreatorDestructor = p_creator_destructor; mpDemFemSearch = p_dem_fem_search; mpSpSearch = pSpSearch; //Also checks old flag name for backward compatibility issues. if(mParameters["do_search_dem_neighbours"].GetBool()) { mDoSearchNeighbourElements = true; } else mDoSearchNeighbourElements = false; p_creator_destructor->SetDoSearchNeighbourElements(mDoSearchNeighbourElements); if(mParameters["do_search_fem_neighbours"].GetBool()) mDoSearchNeighbourFEMElements = true; else mDoSearchNeighbourFEMElements = false; mMaxTimeStep = max_delta_time; mNStepSearch = n_step_search; mSafetyFactor = safety_factor; mpDem_model_part = &(*(settings.r_model_part)); KRATOS_ERROR_IF(mpDem_model_part == NULL) << "Undefined settings.r_model_part in ExplicitSolverStrategy constructor" << std::endl; mpContact_model_part = &(*(settings.contact_model_part)); KRATOS_ERROR_IF(mpContact_model_part == NULL) << "Undefined settings.contact_model_part in ExplicitSolverStrategy constructor" << std::endl; mpFem_model_part = &(*(settings.fem_model_part)); KRATOS_ERROR_IF(mpFem_model_part == NULL) << "Undefined settings.fem_model_part in ExplicitSolverStrategy constructor" << std::endl; mpCluster_model_part = &(*(settings.cluster_model_part)); KRATOS_ERROR_IF(mpCluster_model_part == NULL) << "Undefined settings.cluster_model_part in ExplicitSolverStrategy constructor" << std::endl; mpInlet_model_part = &(*(settings.inlet_model_part)); KRATOS_ERROR_IF(mpInlet_model_part == NULL) << "Undefined settings.inlet_model_part in ExplicitSolverStrategy constructor" << std::endl; if(mParameters["RemoveBallsInitiallyTouchingWalls"].GetBool()) mRemoveBallsInitiallyTouchingWallsOption = true; else mRemoveBallsInitiallyTouchingWallsOption = false; } /// Destructor. virtual ~ExplicitSolverStrategy() { //Timer::SetOuputFile("TimesPartialRelease"); //Timer::PrintTimingInformation(); } struct LessX { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[0] < q->GetGeometry()[0].Coordinates()[0];} }; struct LessY { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[1] < q->GetGeometry()[0].Coordinates()[1];} }; struct LessZ { bool operator()(const SphericParticle* p, const SphericParticle* q) const {return p->GetGeometry()[0].Coordinates()[2] < q->GetGeometry()[0].Coordinates()[2];} }; struct SpatialSortingTraits { typedef SphericParticle* Point_2; typedef LessX Less_x_2; typedef LessY Less_y_2; typedef LessZ Less_z_2; Less_x_2 less_x_2_object() const {return Less_x_2();} Less_y_2 less_y_2_object() const {return Less_y_2();} Less_z_2 less_z_2_object() const { return Less_z_2();} }; #ifdef USING_CGAL void ReorderParticles() { SpatialSortingTraits sst; CGAL::spatial_sort(mListOfSphericParticles.begin(), mListOfSphericParticles.end(), sst); } #endif template <class T> void RebuildListOfSphericParticles(ElementsArrayType& pElements, std::vector<T*>& rCustomListOfParticles){ KRATOS_TRY rCustomListOfParticles.resize(pElements.size()); #pragma omp parallel for for (int k = 0; k < (int)pElements.size(); k++){ ElementsArrayType::iterator particle_pointer_it = pElements.ptr_begin() + k; T* spheric_particle = dynamic_cast<T*>(&(*particle_pointer_it)); rCustomListOfParticles[k] = spheric_particle; } return; KRATOS_CATCH("") } void RebuildListOfDiscontinuumSphericParticles() { RebuildListOfSphericParticles<SphericParticle>(GetModelPart().GetCommunicator().LocalMesh().Elements(), mListOfSphericParticles); } void RebuildPropertiesProxyPointers(std::vector<SphericParticle*>& rCustomListOfSphericParticles); void SendProcessInfoToClustersModelPart(); void UpdateMaxIdOfCreatorDestructor(); void RepairPointersToNormalProperties(std::vector<SphericParticle*>& rCustomListOfSphericParticles); virtual void Initialize(); virtual void AttachSpheresToStickyWalls(); virtual void DisplayThreadInfo(); virtual void CalculateMaxTimeStep(); double CalculateMaxInletTimeStep(); virtual void InitializeClusters(); virtual void GetClustersForce(); virtual void GetRigidBodyElementsForce(); virtual double SolveSolutionStep(); void SearchDEMOperations(ModelPart& r_model_part, bool has_mpi = true); void SearchFEMOperations(ModelPart& r_model_part, bool has_mpi = true) ; virtual void ForceOperations(ModelPart& r_model_part); void InitialTimeStepCalculation(); //TODO: remove this one void GetForce(); void FastGetForce(); virtual void PerformTimeIntegrationOfMotion(int StepFlag = 0); void InitializeSolutionStep(); virtual void BoundingBoxUtility(bool is_time_to_mark_and_remove = true); virtual void FinalizeSolutionStep(); void InitializeElements(); void InitializeDEMElements(); void InitializeFEMElements(); //void InitializeRigidBodyElements(); void InitializeFEMWallsAsRigidBodyElements(ModelPart::SubModelPartsContainerType::iterator& sub_model_part); void MarkToDeleteAllSpheresInitiallyIndentedWithFEM(ModelPart& rSpheresModelPart); void ComputeNodalArea(); void ComputeNormalPressureVectorField(); virtual void CalculateConditionsRHSAndAdd(); void ClearFEMForces(); void CalculateNodalPressuresAndStressesOnWalls(); void SetFlagAndVariableToNodes(const Kratos::Flags& r_flag_name, ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void SetVariableToNodes(ComponentOf3ComponentsVariableType& r_variable_to_set, const double value, NodesArrayType& r_nodes_array); void ResetPrescribedMotionFlagsRespectingImposedDofs(); void ApplyPrescribedBoundaryConditions(); void ApplyInitialConditions(); void SetSearchRadiiOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); void SetNormalRadiiOnAllParticles(ModelPart& r_model_part); void SetSearchRadiiWithFemOnAllParticles(ModelPart& r_model_part, const double added_search_distance = 0.0, const double amplification = 1.0); virtual void SearchNeighbours(); virtual void ComputeNewNeighboursHistoricalData(); virtual void CreateContactElements(); void InitializeContactElements(); // void ContactInitializeSolutionStep(); void PrepareContactElementsForPrinting(); virtual void ComputeNewRigidFaceNeighboursHistoricalData(); virtual void SearchRigidFaceNeighbours(); void CheckHierarchyWithCurrentNeighbours(); /* This should work only with one iteration, but it with mpi does not */ void CalculateInitialMaxIndentations(const ProcessInfo& r_process_info); void PrepareContactModelPart(ModelPart& r_model_part, ModelPart& mcontacts_model_part); void PrepareElementsForPrinting(); void SynchronizeHistoricalVariables(ModelPart& r_model_part); void SynchronizeRHS(ModelPart& r_model_part); void CleanEnergies(); void Check_MPI(bool& has_mpi); ModelPart& GetModelPart() { return (*mpDem_model_part);} ModelPart& GetFemModelPart() { return (*mpFem_model_part);} ModelPart& GetContactModelPart() { return (*mpContact_model_part);} ModelPart& GetClusterModelPart() { return (*mpCluster_model_part);} ModelPart& GetInletModelPart() { return (*mpInlet_model_part);} ModelPart& GetRigidBodyModelPart() { return (*mpRigidBody_model_part);} VectorResultElementsContainerType& GetResults() { return (mResults);} VectorDistanceType& GetResultsDistances() { return (mResultsDistances);} RadiusArrayType& GetArrayOfAmplifiedRadii() { return (mArrayOfAmplifiedRadii);} int& GetNStepSearch() { return (mNStepSearch);} int& GetSearchControl() { return mSearchControl;} int& GetNumberOfThreads() { return (mNumberOfThreads);} double& GetMaxTimeStep() { return (mMaxTimeStep);} double& GetSafetyFactor() { return (mSafetyFactor);} int& GetDeltaOption() { return (mDeltaOption);} ParticleCreatorDestructor::Pointer& GetParticleCreatorDestructor() { return (mpParticleCreatorDestructor);} SpatialSearch::Pointer& GetSpSearch() { return (mpSpSearch);} VectorResultConditionsContainerType& GetRigidFaceResults() { return (mRigidFaceResults);} VectorDistanceType& GetRigidFaceResultsDistances() { return (mRigidFaceResultsDistances);} DEM_FEM_Search::Pointer& GetDemFemSearch() { return (mpDemFemSearch);} virtual ElementsArrayType& GetElements(ModelPart& r_model_part) { return r_model_part.GetCommunicator().LocalMesh().Elements();} virtual ElementsArrayType& GetAllElements(ModelPart& r_model_part) { return r_model_part.Elements(); } protected: Parameters mParameters; bool mRemoveBallsInitiallyTouchingWallsOption; VectorResultElementsContainerType mResults; VectorDistanceType mResultsDistances; RadiusArrayType mArrayOfAmplifiedRadii; int mNStepSearch; int mSearchControl; int mNumberOfThreads; double mMaxTimeStep; double mSafetyFactor; int mDeltaOption; ParticleCreatorDestructor::Pointer mpParticleCreatorDestructor; DEM_FEM_Search::Pointer mpDemFemSearch; SpatialSearch::Pointer mpSpSearch; bool mDoSearchNeighbourElements; bool mDoSearchNeighbourFEMElements; VectorResultConditionsContainerType mRigidFaceResults; VectorDistanceType mRigidFaceResultsDistances; ModelPart *mpFem_model_part; ModelPart *mpDem_model_part; ModelPart *mpInlet_model_part; ModelPart *mpContact_model_part; ModelPart *mpCluster_model_part; ModelPart *mpRigidBody_model_part; std::vector<SphericParticle*> mListOfSphericParticles; std::vector<SphericParticle*> mListOfGhostSphericParticles; }; // Class ExplicitSolverStrategy } // namespace Kratos. #endif // KRATOS_EXPLICIT_SOLVER_STRATEGY defined
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Photoshop spec @ https://www.adobe.com/devnet-apps/photoshop/fileformatashtml % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/channel.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/constitute.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/module.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/policy.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/registry.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if ((image->matte == MagickFalse) || (image->colorspace != sRGBColorspace)) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringNotFalse(option) == MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; gamma=QuantumScale*GetPixelAlpha(q); if (gamma != 0.0 && gamma != 1.0) { SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma); SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma); } q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == QuantumRange) return(MagickTrue); if (image->matte != MagickTrue) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity))); else if (opacity > 0) SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/ (MagickRealType) opacity))); q++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; MagickPixelPacket color; ssize_t y; if (image->matte == MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->matte=MagickTrue; GetMagickPixelPacket(complete_mask,&color); color.red=(MagickRealType) background; (void) SetImageColor(complete_mask,&color); status=CompositeImage(complete_mask,OverCompositeOp,mask, mask->page.x-image->page.x,mask->page.y-image->page.y); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=(MagickRealType) GetPixelAlpha(q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha))); else if (intensity > 0) SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange)); q++; p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=(char) layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image) { const unsigned char *p; ssize_t offset; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const void *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); offset=(ssize_t) count; if (((p+offset) < blocks) || ((p+offset) > (blocks+length))) break; switch (id) { case 0x03ed: { char value[MaxTextExtent]; unsigned short resolution; /* Resolution info. */ if (offset < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->x_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->x_resolution); (void) SetImageProperty(image,"tiff:XResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->y_resolution=(double) resolution; (void) FormatLocaleString(value,MaxTextExtent,"%g", image->y_resolution); (void) SetImageProperty(image,"tiff:YResolution",value); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((offset > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=offset; break; } default: { p+=offset; break; } } if ((offset & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,PixelPacket *q, IndexPacket *indexes,ssize_t x) { if (image->storage_class == PseudoClass) { PixelPacket *color; IndexPacket index; index=(IndexPacket) pixel; if (packet_size == 1) index=(IndexPacket) ScaleQuantumToChar(index); index=ConstrainColormapIndex(image,(ssize_t) index); if (type == 0) SetPixelIndex(indexes+x,index); if ((type == 0) && (channels > 1)) return; color=image->colormap+(ssize_t) GetPixelIndex(indexes+x); if (type != 0) SetPixelAlpha(color,pixel); SetPixelRGBO(q,color); return; } switch (type) { case -1: { SetPixelAlpha(q,pixel); break; } case -2: case 0: { SetPixelRed(q,pixel); if ((channels < 3) || (type == -2)) { SetPixelGreen(q,GetPixelRed(q)); SetPixelBlue(q,GetPixelRed(q)); } break; } case -3: case 1: { SetPixelGreen(q,pixel); break; } case -4: case 2: { SetPixelBlue(q,pixel); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,pixel); else if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->matte != MagickFalse) SetPixelAlpha(q,pixel); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const ssize_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register IndexPacket *indexes; register PixelPacket *q; register ssize_t x; size_t packet_size; unsigned short nibble; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (PixelPacket *) NULL) return MagickFalse; indexes=GetAuthenticIndexQueue(image); packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,indexes,x); q++; } else { ssize_t bit, number_bits; number_bits=(ssize_t) image->columns-x; if (number_bits > 8) number_bits=8; for (bit=0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++); } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t row_size; ssize_t count, y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) memset(pixels,0,row_size*sizeof(*pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != (ssize_t) row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+2048)) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { (void) SeekBlob(image,(MagickOffsetType) layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { (void) ResetImagePixels(mask,exception); mask->matte=MagickFalse; channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, (ssize_t) layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, (ssize_t) layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, (ssize_t) layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } (void) SeekBlob(image,offset+layer_info->channel_info[channel].size-2, SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) (void) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MaxTextExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) { layer_info->image->compose=NoCompositeOp; (void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true"); } if (psd_info->mode == CMYKMode) (void) SetImageColorspace(layer_info->image,CMYKColorspace); else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) || (psd_info->mode == GrayscaleMode)) (void) SetImageColorspace(layer_info->image,GRAYColorspace); /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->matte=MagickTrue; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info, (size_t) j,compression,exception); InheritException(exception,&layer_info->image->exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateImage(layer_info->image,MagickFalse); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < (ssize_t) layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if ((i == 0) && (psd_info->mode == IndexedMode) && (type != 0)) return(MagickFalse); if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,(size_t) count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(ssize_t) ReadBlobSignedShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->matte=MagickTrue; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers*sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t top, left, bottom, right; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); top=(ssize_t) ReadBlobSignedLong(image); left=(ssize_t) ReadBlobSignedLong(image); bottom=(ssize_t) ReadBlobSignedLong(image); right=(ssize_t) ReadBlobSignedLong(image); if ((right < left) || (bottom < top)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } layer_info[i].page.y=top; layer_info[i].page.x=left; layer_info[i].page.width=(size_t) (right-left); layer_info[i].page.height=(size_t) (bottom-top); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.x=(ssize_t) ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) ( ReadBlobSignedLong(image)-layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width, (double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < (ssize_t) layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, (MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickFalse); return(ReadPSDLayersInternal(image,image_info,psd_info,skip_layers, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image* image,const PSDInfo* psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,(MagickOffsetType) i, psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateImage(image,MagickFalse); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } status=ResetImagePixels(image,exception); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } psd_info.min_channels=3; if (psd_info.mode == LabMode) (void) SetImageColorspace(image,LabColorspace); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; (void) SetImageColorspace(image,CMYKColorspace); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,MagickMin((size_t) (psd_info.depth < 16 ? 256 : 65536), MaxColormapSize)); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; (void) SetImageColorspace(image,GRAYColorspace); } if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=(size_t) length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->matte=MagickFalse; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ (void) SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if (has_merged_image != MagickFalse || imageListLength == 1) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { (void) SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.opacity=TransparentOpacity; (void) SetImageBackgroundColor(image); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { Image *next; next=image; while (next != (Image *) NULL) { (void) SetImageProfile(next,GetStringInfoName(profile),profile); next=next->next; } profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=SetMagickInfo("PSB"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Large Document Format"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); entry=SetMagickInfo("PSD"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->seekable_stream=MagickTrue; entry->description=ConstantString("Adobe Photoshop bitmap"); entry->magick_module=ConstantString("PSD"); (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobMSBLong(image,(unsigned int) size)); return(WriteBlobMSBLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickOffsetType offset) { MagickOffsetType current_offset; ssize_t result; current_offset=TellBlob(image); (void) SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBLong(image,(unsigned int) size); else result=WriteBlobMSBLongLong(image,size); (void) SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const ssize_t channels) { ssize_t i, offset, y; if (next_image->compression == RLECompression) { offset=WriteBlobMSBShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) offset+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) offset=WriteBlobMSBShort(image,ZipWithoutPrediction); #endif else offset=WriteBlobMSBShort(image,Raw); return((size_t) offset); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate) { MagickBooleanType monochrome; QuantumInfo *quantum_info; register const PixelPacket *p; register ssize_t i; size_t count, length; ssize_t y; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory( MagickMinBufferExtent,sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception); if (p == (const PixelPacket *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,&image->exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (next_image->compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (next_image->compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) MagickMinBufferExtent; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) MagickMinBufferExtent-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (next_image->compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(Image *image) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); } return(compact_pixels); } static ssize_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate) { Image *mask; MagickOffsetType rows_offset; size_t channels, length, offset_length; ssize_t count; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; if (next_image->compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if ((next_image->storage_class != PseudoClass) || (IsGrayImage(next_image,&next_image->exception) != MagickFalse)) { if (IsGrayImage(next_image,&next_image->exception) == MagickFalse) channels=(size_t) (next_image->colorspace == CMYKColorspace ? 4 : 3); if (next_image->matte != MagickFalse) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,(ssize_t) channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if ((next_image->storage_class == PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsGrayImage(next_image,&next_image->exception) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->matte != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateImage(next_image,MagickFalse); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); if (mask != (Image *) NULL) { if (mask->compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->x_resolution+0.5; y_resolution=2.54*65536.0*image->y_resolution+0.5; units=2; } else { x_resolution=65536.0*image->x_resolution+0.5; y_resolution=65536.0*image->y_resolution+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { ssize_t count; count=WriteBlobMSBSignedShort(image,channel); count+=SetPSDSize(psd_info,image,0); return((size_t) count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(char) (*p++); key[1]=(char) (*p++); key[2]=(char) (*p++); key[3]=(char) (*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); (void) SetImageProfile(image,"psd:additional-info",info); return(profile); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image) { char layer_name[MaxTextExtent]; const char *property; const StringInfo *icc_profile, *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; PSDInfo psd_info; register ssize_t i; size_t layer_count, layer_index, length, name_length, num_channels, packet_size, rounded_size, size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->matte != MagickFalse) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,&image->exception) != MagickFalse)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorMatteType) && (image->storage_class == PseudoClass)) num_channels=(image->matte != MagickFalse ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass); if (image->colorspace != CMYKColorspace) num_channels=(image->matte != MagickFalse ? 4UL : 3UL); else num_channels=(image->matte != MagickFalse ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsGrayImage(image,&image->exception) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsGrayImage(image,&image->exception) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((ssize_t) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } base_image=GetNextImageInList(image); if (base_image == (Image *)NULL) base_image=image; size=0; size_offset=TellBlob(image); (void) SetPSDSize(&psd_info,image,0); (void) SetPSDSize(&psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->matte != MagickFalse) size+=WriteBlobMSBShort(image,-(unsigned short) layer_count); else size+=WriteBlobMSBShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, &image->exception); default_color=(unsigned char) (strlen(property) == 9 ? 255 : 0); } size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y); size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+ next_image->rows)); size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+ next_image->columns)); channels=1; if ((next_image->storage_class != PseudoClass) && (IsGrayImage(next_image,&next_image->exception) == MagickFalse)) channels=(unsigned short) (next_image->colorspace == CMYKColorspace ? 4 : 3); total_channels=channels; if (next_image->matte != MagickFalse) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobMSBShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(&psd_info,image,(signed short) i); if (next_image->matte != MagickFalse) size+=WriteChannelSize(&psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(&psd_info,image,-2); size+=WriteBlob(image,4,(const unsigned char *) "8BIM"); size+=WriteBlob(image,4,(const unsigned char *) CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue, &image->exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,(unsigned char) (next_image->compose == NoCompositeOp ? 1 << 0x02 : 1)); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image); property=(const char *) GetImageProperty(next_image,"label"); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobMSBLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobMSBLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,&image->exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobMSBLong(image,20); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.y); size+=WriteBlobMSBSignedLong(image,(const signed int) mask->page.x); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->rows+ mask->page.y)); size+=WriteBlobMSBSignedLong(image,(const signed int) (mask->columns+ mask->page.x)); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,(unsigned char) ( mask->compose == NoCompositeOp ? 2 : 0)); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobMSBLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info),GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=(size_t) WritePSDChannels(&psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) (void) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } /* Write the total size */ size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(&psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (WritePSDChannels(&psd_info,image_info,image,image,0, MagickFalse) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_unop__sin_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sin_fp32_fp32) // op(A') function: GB (_unop_tran__sin_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = sinf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = sinf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = sinf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sin_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = sinf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = sinf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sin_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr0n = outptr0 + outw; float* outptr1n = outptr1 + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0 + 3); float32x4_t _k06 = vld1q_f32(k0 + 6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1 + 3); float32x4_t _k16 = vld1q_f32(k1 + 6); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r0 "add %5, %5, #16 \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n" // r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // _sum1 "fmla v6.4s, v8.4s, %18.s[0] \n" "fmla v7.4s, v8.4s, %21.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v12.4s}, [%3] \n" // _sum0n "prfm pldl1keep, [%4, #128] \n" "ld1 {v13.4s}, [%4] \n" // _sum1n "fmla v12.4s, v14.4s, %20.s[0] \n" "fmla v13.4s, v14.4s, %23.s[0] \n" "ext v8.16b, v8.16b, v9.16b, #8 \n" "ext v9.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v10.4s, %18.s[1] \n" "fmla v7.4s, v10.4s, %21.s[1] \n" "fmla v12.4s, v11.4s, %20.s[2] \n" "fmla v13.4s, v11.4s, %23.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v14.4s, v15.4s}, [%6] \n" // r1 "add %6, %6, #16 \n" "fmla v6.4s, v8.4s, %18.s[2] \n" "fmla v7.4s, v8.4s, %21.s[2] \n" "fmla v12.4s, v9.4s, %20.s[1] \n" "fmla v13.4s, v9.4s, %23.s[1] \n" "ext v10.16b, v14.16b, v15.16b, #4 \n" "fmla v6.4s, v14.4s, %19.s[0] \n" "fmla v7.4s, v14.4s, %22.s[0] \n" "fmla v12.4s, v14.4s, %18.s[0] \n" "fmla v13.4s, v14.4s, %21.s[0] \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "fmla v6.4s, v10.4s, %19.s[1] \n" "fmla v7.4s, v10.4s, %22.s[1] \n" "fmla v12.4s, v10.4s, %18.s[1] \n" "fmla v13.4s, v10.4s, %21.s[1] \n" "prfm pldl1keep, [%7, #256] \n" "ld1 {v8.4s, v9.4s}, [%7] \n" // r2 "add %7, %7, #16 \n" "fmla v6.4s, v11.4s, %19.s[2] \n" "fmla v7.4s, v11.4s, %22.s[2] \n" "fmla v12.4s, v11.4s, %18.s[2] \n" "fmla v13.4s, v11.4s, %21.s[2] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "fmla v6.4s, v8.4s, %20.s[0] \n" "fmla v7.4s, v8.4s, %23.s[0] \n" "fmla v12.4s, v8.4s, %19.s[0] \n" "fmla v13.4s, v8.4s, %22.s[0] \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %20.s[1] \n" "fmla v7.4s, v10.4s, %23.s[1] \n" "fmla v12.4s, v10.4s, %19.s[1] \n" "fmla v13.4s, v10.4s, %22.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r0 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %20.s[2] \n" "fmla v7.4s, v11.4s, %23.s[2] \n" "fmla v12.4s, v11.4s, %19.s[2] \n" "fmla v13.4s, v11.4s, %22.s[2] \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v14.4s, v15.4s}, [%8] \n" // r3 "add %8, %8, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "ext v11.16b, v14.16b, v15.16b, #8 \n" "st1 {v12.4s}, [%3], #16 \n" "st1 {v13.4s}, [%4], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %5, %5, #16 \n" "sub %8, %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n" // r0 "add %5, #16 \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n" // r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q14, q15, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1 :64] \n" // _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2 :64] \n" // _sum1 "vmla.f32 q6, q8, %e18[0] \n" "vmla.f32 q7, q8, %e21[0] \n" "pld [%3, #128] \n" "vld1.f32 {d24-d25}, [%3] \n" // _sum0n "pld [%4, #128] \n" "vld1.f32 {d26-d27}, [%4] \n" // _sum1n "vmla.f32 q12, q14, %e20[0] \n" "vmla.f32 q13, q14, %e23[0] \n" "vext.32 q8, q8, q9, #2 \n" "vext.32 q9, q14, q15, #1 \n" "vmla.f32 q6, q10, %e18[1] \n" "vmla.f32 q7, q10, %e21[1] \n" "vmla.f32 q12, q11, %f20[0] \n" "vmla.f32 q13, q11, %f23[0] \n" "pld [%6, #192] \n" "vld1.f32 {d28-d30}, [%6] \n" // r1 "add %6, #16 \n" "vmla.f32 q6, q8, %f18[0] \n" "vmla.f32 q7, q8, %f21[0] \n" "vmla.f32 q12, q9, %e20[1] \n" "vmla.f32 q13, q9, %e23[1] \n" "vext.32 q10, q14, q15, #1 \n" "vmla.f32 q6, q14, %e19[0] \n" "vmla.f32 q7, q14, %e22[0] \n" "vmla.f32 q12, q14, %e18[0] \n" "vmla.f32 q13, q14, %e21[0] \n" "vext.32 q11, q14, q15, #2 \n" "vmla.f32 q6, q10, %e19[1] \n" "vmla.f32 q7, q10, %e22[1] \n" "vmla.f32 q12, q10, %e18[1] \n" "vmla.f32 q13, q10, %e21[1] \n" "pld [%7, #192] \n" "vld1.f32 {d16-d18}, [%7 :64] \n" // r2 "add %7, #16 \n" "vmla.f32 q6, q11, %f19[0] \n" "vmla.f32 q7, q11, %f22[0] \n" "vmla.f32 q12, q11, %f18[0] \n" "vmla.f32 q13, q11, %f21[0] \n" "vext.32 q10, q8, q9, #1 \n" "vmla.f32 q6, q8, %e20[0] \n" "vmla.f32 q7, q8, %e23[0] \n" "vmla.f32 q12, q8, %e19[0] \n" "vmla.f32 q13, q8, %e22[0] \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e20[1] \n" "vmla.f32 q7, q10, %e23[1] \n" "vmla.f32 q12, q10, %e19[1] \n" "vmla.f32 q13, q10, %e22[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5 :64] \n" // r0 "add %5, #16 \n" "vmla.f32 q6, q11, %f20[0] \n" "vmla.f32 q7, q11, %f23[0] \n" "vmla.f32 q12, q11, %f19[0] \n" "vmla.f32 q13, q11, %f22[0] \n" "pld [%8, #192] \n" "vld1.f32 {d28-d30}, [%8] \n" // r3 "add %8, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vst1.f32 {d12-d13}, [%1 : 64]!\n" "vst1.f32 {d14-d15}, [%2 : 64]!\n" "vext.32 q11, q14, q15, #2 \n" "vst1.f32 {d24-d25}, [%3]! \n" "vst1.f32 {d26-d27}, [%4]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %5, #16 \n" "sub %8, #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr0n), // %3 "=r"(outptr1n), // %4 "=r"(r0), // %5 "=r"(r1), // %6 "=r"(r2), // %7 "=r"(r3) // %8 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr0n), "4"(outptr1n), "5"(r0), "6"(r1), "7"(r2), "8"(r3), "w"(_k00), // %18 "w"(_k03), // %19 "w"(_k06), // %20 "w"(_k10), // %21 "w"(_k13), // %22 "w"(_k16) // %23 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); float32x4_t _sum0n = vmulq_f32(_r10, _k00); float32x4_t _sum1n = vmulq_f32(_r10, _k10); _sum0n = vmlaq_f32(_sum0n, _r20, _k03); _sum1n = vmlaq_f32(_sum1n, _r20, _k13); _sum0n = vmlaq_f32(_sum0n, _r30, _k06); _sum1n = vmlaq_f32(_sum1n, _r30, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); _sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3); _sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); *outptr0n = vaddvq_f32(_sum0n); *outptr1n = vaddvq_f32(_sum1n); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n)); float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); *outptr0n = vget_lane_f32(_ss01n, 0); *outptr1n = vget_lane_f32(_ss01n, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum0n = 0.f; float sum1 = 0.f; float sum1n = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; sum0n += r1[0] * k0[0]; sum0n += r1[1] * k0[1]; sum0n += r1[2] * k0[2]; sum0n += r2[0] * k0[3]; sum0n += r2[1] * k0[4]; sum0n += r2[2] * k0[5]; sum0n += r3[0] * k0[6]; sum0n += r3[1] * k0[7]; sum0n += r3[2] * k0[8]; sum1n += r1[0] * k1[0]; sum1n += r1[1] * k1[1]; sum1n += r1[2] * k1[2]; sum1n += r2[0] * k1[3]; sum1n += r2[1] * k1[4]; sum1n += r2[2] * k1[5]; sum1n += r3[0] * k1[6]; sum1n += r3[1] * k1[7]; sum1n += r3[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; *outptr0n += sum0n; *outptr1n += sum1n; #endif // __ARM_NEON r0++; r1++; r2++; r3++; outptr0++; outptr1++; outptr0n++; outptr1n++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr0 += outw; outptr1 += outw; outptr0n += outw; outptr1n += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r0 "add %3, %3, #16 \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // _sum0 "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // _sum1 "fmul v14.4s, v8.4s, %12.s[0] \n" "fmul v15.4s, v8.4s, %15.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %12.s[1] \n" "fmla v7.4s, v10.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" // r1 "add %4, %4, #16 \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "fmla v15.4s, v11.4s, %15.s[2] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v14.4s, v10.4s, %13.s[1] \n" "fmla v15.4s, v10.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v8.4s, v9.4s}, [%5] \n" // r2 "add %5, %5, #16 \n" "fmla v6.4s, v11.4s, %13.s[2] \n" "fmla v7.4s, v11.4s, %16.s[2] \n" "fmla v14.4s, v8.4s, %14.s[0] \n" "fmla v15.4s, v8.4s, %17.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v6.4s, v10.4s, %14.s[1] \n" "fmla v7.4s, v10.4s, %17.s[1] \n" "fmla v14.4s, v11.4s, %14.s[2] \n" "fmla v15.4s, v11.4s, %17.s[2] \n" "fadd v6.4s, v6.4s, v14.4s \n" "fadd v7.4s, v7.4s, v15.4s \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "0: \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n" // r0 "add %3, #16 \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n" // _sum0 "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n" // _sum1 "vmul.f32 q14, q8, %e12[0] \n" "vmul.f32 q15, q8, %e15[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e12[1] \n" "vmla.f32 q7, q10, %e15[1] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n" // r1 "add %4, #16 \n" "vmla.f32 q14, q11, %f12[0] \n" "vmla.f32 q15, q11, %f15[0] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q14, q10, %e13[1] \n" "vmla.f32 q15, q10, %e16[1] \n" "pld [%5, #192] \n" "vld1.f32 {d16-d18}, [%5] \n" // r2 "add %5, #16 \n" "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "vmla.f32 q14, q8, %e14[0] \n" "vmla.f32 q15, q8, %e17[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q6, q10, %e14[1] \n" "vmla.f32 q7, q10, %e17[1] \n" "vmla.f32 q14, q11, %f14[0] \n" "vmla.f32 q15, q11, %f17[0] \n" "vadd.f32 q6, q6, q14 \n" "vadd.f32 q7, q7, q15 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0++; r1++; r2++; outptr0++; outptr1++; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p * inch * 9; for (int q = 0; q < inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k3456 = vld1q_f32(kernel0 + 3); float32x4_t _k6789 = vld1q_f32(kernel0 + 6); #else const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" // r0 "add %3, %3, #16 \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // _sum "fmla v7.4s, v9.4s, %14.s[0] \n" "fmul v6.4s, v11.4s, %14.s[1] \n" "fmul v13.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v9.4s, v10.4s}, [%4] \n" // r1 "add %4, %4, #16 \n" "fmla v7.4s, v9.4s, %15.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %15.s[1] \n" "fmla v13.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.4s}, [%2] \n" // _sum2 "fmla v8.4s, v9.4s, %14.s[0] \n" "fmul v14.4s, v11.4s, %14.s[1] \n" "fmul v15.4s, v12.4s, %14.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v9.4s, v10.4s}, [%5] \n" // r2 "add %5, %5, #16 \n" "fmla v7.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v6.4s, v11.4s, %16.s[1] \n" "fmla v13.4s, v12.4s, %16.s[2] \n" "fmla v8.4s, v9.4s, %15.s[0] \n" "fmla v14.4s, v11.4s, %15.s[1] \n" "fmla v15.4s, v12.4s, %15.s[2] \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v9.4s, v10.4s}, [%6] \n" // r3 "add %6, %6, #16 \n" "fmla v8.4s, v9.4s, %16.s[0] \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "fmla v14.4s, v11.4s, %16.s[1] \n" "fmla v15.4s, v12.4s, %16.s[2] \n" "fadd v7.4s, v7.4s, v6.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v9.4s, v10.4s}, [%3] \n" // r0 "fadd v8.4s, v8.4s, v14.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v8.4s, v8.4s, v15.4s \n" "ext v11.16b, v9.16b, v10.16b, #4 \n" "ext v12.16b, v9.16b, v10.16b, #8 \n" "add %3, %3, #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v8.4s}, [%2], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %3, %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "add %3, #16 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1 :64] \n" // _sum "vmla.f32 q7, q9, %e14[0] \n" "vmul.f32 q6, q11, %e14[1] \n" "vmul.f32 q13, q12, %f14[0] \n" "pld [%4, #192] \n" "vld1.f32 {d18-d20}, [%4] \n" // r1 "add %4, #16 \n" "vmla.f32 q7, q9, %e15[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e15[1] \n" "vmla.f32 q13, q12, %f15[0] \n" "pld [%2, #128] \n" "vld1.f32 {d16-d17}, [%2] \n" // _sum2 "vmla.f32 q8, q9, %e14[0] \n" "vmul.f32 q14, q11, %e14[1] \n" "vmul.f32 q15, q12, %f14[0] \n" "pld [%5, #192] \n" "vld1.f32 {d18-d20}, [%5 :64] \n" // r2 "add %5, #16 \n" "vmla.f32 q7, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q6, q11, %e16[1] \n" "vmla.f32 q13, q12, %f16[0] \n" "vmla.f32 q8, q9, %e15[0] \n" "vmla.f32 q14, q11, %e15[1] \n" "vmla.f32 q15, q12, %f15[0] \n" "pld [%6, #192] \n" "vld1.f32 {d18-d20}, [%6] \n" // r3 "add %6, #16 \n" "vmla.f32 q8, q9, %e16[0] \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "vmla.f32 q14, q11, %e16[1] \n" "vmla.f32 q15, q12, %f16[0] \n" "vadd.f32 q7, q7, q6 \n" "pld [%3, #192] \n" "vld1.f32 {d18-d20}, [%3 :64] \n" // r0 "vadd.f32 q8, q8, q14 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q8, q8, q15 \n" "vext.32 q11, q9, q10, #1 \n" "vext.32 q12, q9, q10, #2 \n" "add %3, #16 \n" "vst1.f32 {d14-d15}, [%1]! \n" "vst1.f32 {d16-d17}, [%2]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %3, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3) // %6 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "w"(_k0123), // %14 "w"(_k3456), // %15 "w"(_k6789) // %16 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r30 = vld1q_f32(r3); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); float32x4_t _sum2 = vmulq_f32(_r10, _k0123); _sum2 = vmlaq_f32(_sum2, _r20, _k3456); _sum2 = vmlaq_f32(_sum2, _r30, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); _sum2 = vsetq_lane_f32(*outptr2, _sum2, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); *outptr2 = vaddvq_f32(_sum2); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _sss2 = vpadd_f32(_ss, _ss2); *outptr = vget_lane_f32(_sss2, 0); *outptr2 = vget_lane_f32(_sss2, 1); #endif // __aarch64__ #else float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; #endif r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r0 "add %2, %2, #16 \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v7.4s}, [%1] \n" // _sum "fmla v7.4s, v8.4s, %10.s[0] \n" "fmul v13.4s, v10.4s, %10.s[1] \n" "fmul v14.4s, v11.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v8.4s, v9.4s}, [%3] \n" // r1 "add %3, %3, #16 \n" "fmla v7.4s, v8.4s, %11.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %11.s[1] \n" "fmla v14.4s, v11.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.4s, v9.4s}, [%4] \n" // r2 "add %4, %4, #16 \n" "fmla v7.4s, v8.4s, %12.s[0] \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "fmla v13.4s, v10.4s, %12.s[1] \n" "fmla v14.4s, v11.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.4s, v9.4s}, [%2] \n" // r0 "add %2, %2, #16 \n" "fadd v7.4s, v7.4s, v13.4s \n" "fadd v7.4s, v7.4s, v14.4s \n" "ext v10.16b, v8.16b, v9.16b, #4 \n" "ext v11.16b, v8.16b, v9.16b, #8 \n" "st1 {v7.4s}, [%1], #16 \n" "subs %w0, %w0, #1 \n" "bne 0b \n" "sub %2, %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d14-d15}, [%1] \n" // _sum "vmla.f32 q7, q8, %e10[0] \n" "vmul.f32 q13, q10, %e10[1] \n" "vmul.f32 q14, q11, %f10[0] \n" "pld [%3, #192] \n" "vld1.f32 {d16-d18}, [%3] \n" // r1 "add %3, #16 \n" "vmla.f32 q7, q8, %e11[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e11[1] \n" "vmla.f32 q14, q11, %f11[0] \n" "pld [%4, #192] \n" "vld1.f32 {d16-d18}, [%4] \n" // r2 "add %4, #16 \n" "vmla.f32 q7, q8, %e12[0] \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vmla.f32 q13, q10, %e12[1] \n" "vmla.f32 q14, q11, %f12[0] \n" "pld [%2, #192] \n" "vld1.f32 {d16-d18}, [%2] \n" // r0 "add %2, #16 \n" "vadd.f32 q7, q7, q13 \n" "vadd.f32 q7, q7, q14 \n" "vext.32 q10, q8, q9, #1 \n" "vext.32 q11, q8, q9, #2 \n" "vst1.f32 {d14-d15}, [%1]! \n" "subs %0, #1 \n" "bne 0b \n" "sub %2, #16 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } } } static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd4 // interleave weights int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; Mat kernel_tm2(8 * 8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4); #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; float* ktm2 = kernel_tm2.channel(pp); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); int q = 0; #if __ARM_NEON && __aarch64__ for (; q + 3 < inch; q += 4) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q + 1); const float* k02 = kernel0_tm.row(q + 2); const float* k03 = kernel0_tm.row(q + 3); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q + 1); const float* k12 = kernel1_tm.row(q + 2); const float* k13 = kernel1_tm.row(q + 3); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q + 1); const float* k22 = kernel2_tm.row(q + 2); const float* k23 = kernel2_tm.row(q + 3); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q + 1); const float* k32 = kernel3_tm.row(q + 2); const float* k33 = kernel3_tm.row(q + 3); for (int r = 0; r < 16; r++) { // split into two asm blocks for gcc reject over 30 oprands :( asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k02), // %3 "=r"(k03), // %4 "=r"(k10), // %5 "=r"(k11), // %6 "=r"(k12), // %7 "=r"(k13) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k02), "4"(k03), "5"(k10), "6"(k11), "7"(k12), "8"(k13) : "cc", "memory", "v0", "v1", "v2", "v3"); asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "ld1 {v2.4s}, [%7], #16 \n" "ld1 {v3.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" : "=r"(ktm2), // %0 "=r"(k20), // %1 "=r"(k21), // %2 "=r"(k22), // %3 "=r"(k23), // %4 "=r"(k30), // %5 "=r"(k31), // %6 "=r"(k32), // %7 "=r"(k33) // %8 : "0"(ktm2), "1"(k20), "2"(k21), "3"(k22), "4"(k23), "5"(k30), "6"(k31), "7"(k32), "8"(k33) : "cc", "memory", "v0", "v1", "v2", "v3"); } } #endif // __ARM_NEON && __aarch64__ for (; q + 1 < inch; q += 2) { const float* k00 = kernel0_tm.row(q); const float* k01 = kernel0_tm.row(q + 1); const float* k10 = kernel1_tm.row(q); const float* k11 = kernel1_tm.row(q + 1); const float* k20 = kernel2_tm.row(q); const float* k21 = kernel2_tm.row(q + 1); const float* k30 = kernel3_tm.row(q); const float* k31 = kernel3_tm.row(q + 1); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%5], #16 \n" "ld1 {v1.4s}, [%6], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%7], #16 \n" "ld1 {v1.4s}, [%8], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "v0", "v1"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vld1.f32 {d2-d3}, [%6 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%7 :128]! \n" "vld1.f32 {d2-d3}, [%8 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k01), // %2 "=r"(k10), // %3 "=r"(k11), // %4 "=r"(k20), // %5 "=r"(k21), // %6 "=r"(k30), // %7 "=r"(k31) // %8 : "0"(ktm2), "1"(k00), "2"(k01), "3"(k10), "4"(k11), "5"(k20), "6"(k21), "7"(k30), "8"(k31) : "cc", "memory", "q0", "q1"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[0 + m] = k00[m]; ktm2[4 + m] = k01[m]; ktm2[8 + m] = k10[m]; ktm2[12 + m] = k11[m]; ktm2[16 + m] = k20[m]; ktm2[20 + m] = k21[m]; ktm2[24 + m] = k30[m]; ktm2[28 + m] = k31[m]; } k00 += 4; k01 += 4; k10 += 4; k11 += 4; k20 += 4; k21 += 4; k30 += 4; k31 += 4; ktm2 += 32; #endif // __ARM_NEON } } for (; q < inch; q++) { const float* k00 = kernel0_tm.row(q); const float* k10 = kernel1_tm.row(q); const float* k20 = kernel2_tm.row(q); const float* k30 = kernel3_tm.row(q); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "ld1 {v1.4s}, [%2], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" "ld1 {v0.4s}, [%3], #16 \n" "ld1 {v1.4s}, [%4], #16 \n" "st1 {v0.4s, v1.4s}, [%0], #32 \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "v0", "v1"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vld1.f32 {d2-d3}, [%2 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" "vld1.f32 {d0-d1}, [%3 :128]! \n" "vld1.f32 {d2-d3}, [%4 :128]! \n" "vst1.f32 {d0-d3}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00), // %1 "=r"(k10), // %2 "=r"(k20), // %3 "=r"(k30) // %4 : "0"(ktm2), "1"(k00), "2"(k10), "3"(k20), "4"(k30) : "cc", "memory", "q0", "q1"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[0 + m] = k00[m]; ktm2[4 + m] = k10[m]; ktm2[8 + m] = k20[m]; ktm2[12 + m] = k30[m]; } k00 += 4; k10 += 4; k20 += 4; k30 += 4; ktm2 += 16; #endif // __ARM_NEON } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start); const Mat kernel0_tm = kernel_tm.channel(p); int q = 0; for (; q < inch; q++) { const float* k00 = kernel0_tm.row(q); for (int r = 0; r < 16; r++) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%1], #16 \n" "st1 {v0.4s}, [%0], #16 \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "v0"); #else asm volatile( "vld1.f32 {d0-d1}, [%1 :128]! \n" "vst1.f32 {d0-d1}, [%0 :128]! \n" : "=r"(ktm2), // %0 "=r"(k00) // %1 : "0"(ktm2), "1"(k00) : "cc", "memory", "q0"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { ktm2[m] = k00[m]; } k00 += 4; ktm2 += 4; #endif // __ARM_NEON } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch, const Option& opt) { kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // optimized layout for winograd5 // interleave weights // Mat kernel_tm2(8*8, inch, outch); // Mat kernel_tm2(inch, 64, outch); #if __ARM_NEON && __aarch64__ Mat kernel_tm2(8 * 4 * (inch / 4) + 8 * (inch % 4), 64, outch / 8 + (outch % 8) / 4 + outch % 4); #else Mat kernel_tm2(4 * 4 * (inch / 4) + 4 * (inch % 4), 64, outch / 4 + outch % 4); #endif int p = 0; #if __aarch64__ for (; p + 7 < outch; p += 8) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); const Mat kernel4_tm = kernel_tm.channel(p + 4); const Mat kernel5_tm = kernel_tm.channel(p + 5); const Mat kernel6_tm = kernel_tm.channel(p + 6); const Mat kernel7_tm = kernel_tm.channel(p + 7); Mat ktm2 = kernel_tm2.channel(p / 8); for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); const float* ktm4_0 = kernel4_tm.row(q); const float* ktm5_0 = kernel5_tm.row(q); const float* ktm6_0 = kernel6_tm.row(q); const float* ktm7_0 = kernel7_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p[4] = ktm4_0[r]; ktm2p[5] = ktm5_0[r]; ktm2p[6] = ktm6_0[r]; ktm2p[7] = ktm7_0[r]; ktm2p += 8; } } } #endif // __aarch64__ for (; p + 3 < outch; p += 4) { const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p + 1); const Mat kernel2_tm = kernel_tm.channel(p + 2); const Mat kernel3_tm = kernel_tm.channel(p + 3); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4); #else Mat ktm2 = kernel_tm2.channel(p / 4); #endif for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); const float* ktm1_0 = kernel1_tm.row(q); const float* ktm2_0 = kernel2_tm.row(q); const float* ktm3_0 = kernel3_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p[1] = ktm1_0[r]; ktm2p[2] = ktm2_0[r]; ktm2p[3] = ktm3_0[r]; ktm2p += 4; } } } for (; p < outch; p++) { const Mat kernel0_tm = kernel_tm.channel(p); #if __ARM_NEON && __aarch64__ Mat ktm2 = kernel_tm2.channel(p / 8 + (p % 8) / 4 + p % 4); #else Mat ktm2 = kernel_tm2.channel(p / 4 + p % 4); #endif for (int r = 0; r < 64; r++) { float* ktm2p = ktm2.row(r); for (int q = 0; q < inch; q++) { const float* ktm0_0 = kernel0_tm.row(q); ktm2p[0] = ktm0_0[r]; ktm2p += 1; } } } kernel_tm = kernel_tm2; } static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; bottom_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, inch, 4u, opt.workspace_allocator); const int tiles = w_tm / 8 * h_tm / 8; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff + 4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w * 2; const float* r3 = r0 + w * 3; // the assembly block for armv7 input transform requires 13 general registers // old gcc may fail to allocate register on debug build without -fomit-frame-pointer // so, fallback to intrinsic version for armv7 debug build --- nihui #if __aarch64__ || !defined(NDEBUG) for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0 + 4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1 + 4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2 + 4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3 + 4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w * 4; r1 += w * 4; r2 += w * 4; r3 += w * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2 + 4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3); float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3); float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3); float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3); t0 += 8 * 4; t1 += 8 * 4; t2 += 8 * 4; t3 += 8 * 4; r0_tm0_0 += img0_tm.w * tiles * 2 * 4; r0_tm0_4 += img0_tm.w * tiles * 2 * 4; r0_tm1_0 += img0_tm.w * tiles * 2 * 4; r0_tm1_4 += img0_tm.w * tiles * 2 * 4; r0_tm2_0 += img0_tm.w * tiles * 2 * 4; r0_tm2_4 += img0_tm.w * tiles * 2 * 4; r0_tm3_0 += img0_tm.w * tiles * 2 * 4; r0_tm3_4 += img0_tm.w * tiles * 2 * 4; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); int step = img0_tm.w * tiles * 2 * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%2], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4], %26 \n" "vst1.f32 {d17[1]}, [%6], %26 \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "sub %0, %0, #12 \n" "sub %2, %2, #12 \n" "sub %4, %4, #12 \n" "sub %6, %6, #12 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1], %26 \n" "vst1.f32 {d4-d5}, [%3], %26 \n" "vst1.f32 {d6-d7}, [%5], %26 \n" "vst1.f32 {d12-d13}, [%7], %26 \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0]! \n" "vst1.f32 {d4[1]}, [%2]! \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%4]! \n" "vst1.f32 {d5[1]}, [%6]! \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%2]! \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%4]! \n" "vst1.f32 {d17[1]}, [%6]! \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0]! \n" "vst1.f32 {d18[1]}, [%2]! \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%4]! \n" "vst1.f32 {d19[1]}, [%6]! \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16[0]}, [%0] \n" "vst1.f32 {d16[1]}, [%2] \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d17[0]}, [%4] \n" "vst1.f32 {d17[1]}, [%6] \n" "vtrn.32 q9, q2 \n" "vtrn.32 q3, q6 \n" "vswp d19, d6 \n" "vswp d5, d12 \n" "vst1.f32 {d18-d19}, [%1] \n" "vst1.f32 {d4-d5}, [%3] \n" "vst1.f32 {d6-d7}, [%5] \n" "vst1.f32 {d12-d13}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm0_4), // %1 "=r"(r0_tm1_0), // %2 "=r"(r0_tm1_4), // %3 "=r"(r0_tm2_0), // %4 "=r"(r0_tm2_4), // %5 "=r"(r0_tm3_0), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm0_4), "2"(r0_tm1_0), "3"(r0_tm1_4), "4"(r0_tm2_0), "5"(r0_tm2_4), "6"(r0_tm3_0), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m = 0; m < 8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles); for (int m = 0; m < 8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_0[1] = tmp12a + tmp12b; r0_tm_0[2] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_0[3] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_4[1] = tmp56a + tmp56b; r0_tm_4[2] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 2; r0_tm_4 += img0_tm.w * tiles * 2; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; top_blob_tm.create(4, 16 * w_tm / 8 * h_tm / 8, outch, 4u, opt.workspace_allocator); const int tiles = h_tm / 8 * w_tm / 8; int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); const float* ktm = kernel_tm.channel(pp); out0_tm.fill(0.f); out1_tm.fill(0.f); out2_tm.fill(0.f); out3_tm.fill(0.f); int q = 0; #if __ARM_NEON && __aarch64__ for (; q + 3 < inch; q += 4) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q + 1); const float* r2 = bottom_blob_tm.channel(q + 2); const float* r3 = bottom_blob_tm.channel(q + 3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n" // v0 v1 v2 v3 = _k00 _k01 _k02 _k03 "prfm pldl1keep, [%8, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n" // v4 v5 v6 v7 = _k10 _k11 _k12 _k13 "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" // v8 v9 v10 v11 = _k20 _k21 _k22 _k23 "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" // v12 v13 v14 v15 = _k30 _k31 _k32 _k33 // tile loop "lsr w1, %w18, #2 \n" // w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" // "ld1 {v16.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "add x4, %0, #16 \n" // x4 = %0 next "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "add x5, %1, #16 \n" // x5 = %1 next "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "add x6, %2, #16 \n" // x6 = %2 next "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "add x7, %3, #16 \n" // x7 = %3 next "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "add %0, %0, #32 \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "st1 {v21.4s}, [%1] \n" "add %1, %1, #32 \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "st1 {v22.4s}, [%2] \n" "add %2, %2, #32 \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "st1 {v23.4s}, [%3] \n" "add %3, %3, #32 \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "st1 {v24.4s}, [x4] \n" "add x4, x4, #32 \n" "fmla v20.4s, v16.4s, v0.4s \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v22.4s, v16.4s, v8.4s \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [x4, #128] \n" "ld1 {v24.4s}, [x4] \n" "st1 {v25.4s}, [x5] \n" "add x5, x5, #32 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [x5, #128] \n" "ld1 {v25.4s}, [x5] \n" "st1 {v26.4s}, [x6] \n" "add x6, x6, #32 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [x6, #128] \n" "ld1 {v26.4s}, [x6] \n" "st1 {v27.4s}, [x7] \n" "add x7, x7, #32 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" /////// "prfm pldl1keep, [x7, #128] \n" "ld1 {v27.4s}, [x7] \n" "st1 {v20.4s}, [%0] \n" "fmla v24.4s, v16.4s, v0.4s \n" "fmla v25.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v26.4s, v16.4s, v8.4s \n" "fmla v27.4s, v16.4s, v12.4s \n" "st1 {v21.4s}, [%1] \n" "fmla v24.4s, v17.4s, v1.4s \n" "fmla v25.4s, v17.4s, v5.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v26.4s, v17.4s, v9.4s \n" "fmla v27.4s, v17.4s, v13.4s \n" "st1 {v22.4s}, [%2] \n" "fmla v24.4s, v18.4s, v2.4s \n" "fmla v25.4s, v18.4s, v6.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v26.4s, v18.4s, v10.4s \n" "fmla v27.4s, v18.4s, v14.4s \n" "st1 {v23.4s}, [%3] \n" "fmla v24.4s, v19.4s, v3.4s \n" "fmla v25.4s, v19.4s, v7.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "fmla v26.4s, v19.4s, v11.4s \n" "fmla v27.4s, v19.4s, v15.4s \n" "st1 {v24.4s}, [x4], #16 \n" "mov %0, x4 \n" "st1 {v25.4s}, [x5], #16 \n" "mov %1, x5 \n" "subs w1, w1, #1 \n" "st1 {v26.4s}, [x6], #16 \n" "mov %2, x6 \n" "st1 {v27.4s}, [x7], #16 \n" "mov %3, x7 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w18, #3 \n" // w1 = remain = tiles & 3 "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v20.4s}, [%0] \n" "fmla v20.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v21.4s}, [%1] \n" "fmla v21.4s, v16.4s, v4.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v22.4s}, [%2] \n" "fmla v22.4s, v16.4s, v8.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v23.4s}, [%3] \n" "fmla v23.4s, v16.4s, v12.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v17.4s}, [%5], #16 \n" "fmla v20.4s, v17.4s, v1.4s \n" "fmla v21.4s, v17.4s, v5.4s \n" "fmla v22.4s, v17.4s, v9.4s \n" "fmla v23.4s, v17.4s, v13.4s \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v18.4s}, [%6], #16 \n" "fmla v20.4s, v18.4s, v2.4s \n" "fmla v21.4s, v18.4s, v6.4s \n" "fmla v22.4s, v18.4s, v10.4s \n" "fmla v23.4s, v18.4s, v14.4s \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v19.4s}, [%7], #16 \n" "fmla v20.4s, v19.4s, v3.4s \n" "fmla v21.4s, v19.4s, v7.4s \n" "fmla v22.4s, v19.4s, v11.4s \n" "fmla v23.4s, v19.4s, v15.4s \n" "st1 {v20.4s}, [%0], #16 \n" "st1 {v21.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v22.4s}, [%2], #16 \n" "st1 {v23.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(r2), // %6 "=r"(r3), // %7 "=r"(ktm) // %8 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(r2), "7"(r3), "8"(ktm), "r"(tiles) // %18 : "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __ARM_NEON && __aarch64__ for (; q + 1 < inch; q += 2) { const float* r0 = bottom_blob_tm.channel(q); const float* r1 = bottom_blob_tm.channel(q + 1); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%6, #256] \n" "ld1 {v0.4s, v1.4s}, [%6], #32 \n" // v0 v1 = _k00 _k01 "prfm pldl1keep, [%6, #256] \n" "ld1 {v2.4s, v3.4s}, [%6], #32 \n" // v2 v3 = _k10 _k11 "prfm pldl1keep, [%6, #256] \n" "ld1 {v4.4s, v5.4s}, [%6], #32 \n" // v4 v5 = _k20 _k21 "prfm pldl1keep, [%6, #256] \n" "ld1 {v6.4s, v7.4s}, [%6], #32 \n" // v6 v7 = _k30 _k31 // tile loop "lsr w1, %w14, #2 \n" // w1 = nn = tiles >> 2 "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "1: \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" //// "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and w1, %w14, #3 \n" // w1 = remain = tiles & 3 "cmp w1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v20.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v20.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1] \n" "fmla v17.4s, v20.4s, v2.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v18.4s}, [%2] \n" "fmla v18.4s, v20.4s, v4.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v19.4s}, [%3] \n" "fmla v19.4s, v20.4s, v6.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v21.4s}, [%5], #16 \n" "fmla v16.4s, v21.4s, v1.4s \n" "fmla v17.4s, v21.4s, v3.4s \n" "fmla v18.4s, v21.4s, v5.4s \n" "fmla v19.4s, v21.4s, v7.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "bne 3b \n" //END remain loop "4: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21"); #else asm volatile( "mov r0, #16 \n" // r0 = r = 16 "0: \n" "pld [%6, #256] \n" "vld1.f32 {d0-d3}, [%6 :128]! \n" // q0 q1 = _k00 _k01 "pld [%6, #256] \n" "vld1.f32 {d4-d7}, [%6 :128]! \n" // q2 q3 = _k10 _k11 "pld [%6, #256] \n" "vld1.f32 {d8-d11}, [%6 :128]! \n" // q4 q5 = _k20 _k21 "pld [%6, #256] \n" "vld1.f32 {d12-d15}, [%6 :128]! \n" // q6 q7 = _k30 _k31 // tile loop "lsr r1, %14, #2 \n" // r1 = nn = tiles >> 2 "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "1: \n" "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" //// "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" "sub %4, %4, #16 \n" //END tile loop "2: \n" // remain loop "and r1, %14, #3 \n" // r1 = remain = tiles & 3 "cmp r1, #0 \n" "beq 4f \n" //BEGIN remain loop "3: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q2 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q4 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q6 \n" "pld [%5, #128] \n" "vld1.f32 {d26-d27}, [%5 :128]! \n" // q13 = _r1 "vmla.f32 q8, q13, q1 \n" "vmla.f32 q9, q13, q3 \n" "vmla.f32 q10, q13, q5 \n" "vmla.f32 q11, q13, q7 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 3b \n" //END remain loop "4: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(r1), // %5 "=r"(ktm) // %6 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(r1), "6"(ktm), "r"(tiles) // %14 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"); #endif // __aarch64__ #else for (int r = 0; r < 16; r++) { for (int t = 0; t < tiles; t++) { for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[0 + m]; output0_tm[m] += r1[m] * ktm[4 + m]; output1_tm[m] += r0[m] * ktm[8 + m]; output1_tm[m] += r1[m] * ktm[12 + m]; output2_tm[m] += r0[m] * ktm[16 + m]; output2_tm[m] += r1[m] * ktm[20 + m]; output3_tm[m] += r0[m] * ktm[24 + m]; output3_tm[m] += r1[m] * ktm[28 + m]; } r0 += 4; r1 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 32; } #endif // __ARM_NEON } for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; #if __ARM_NEON #if __aarch64__ asm volatile( "mov w0, #16 \n" // w0 = r = 16 "0: \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n" // v0 v1 = _k00 _k10 "prfm pldl1keep, [%5, #256] \n" "ld1 {v2.4s, v3.4s}, [%5], #32 \n" // v2 v3 = _k20 _k30 // tile loop "mov w1, %w12 \n" // w1 = tiles "cmp w1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v16.4s}, [%4], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v17.4s}, [%0] \n" "fmla v17.4s, v16.4s, v0.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" "fmla v18.4s, v16.4s, v1.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" "fmla v19.4s, v16.4s, v2.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v20.4s}, [%3] \n" "fmla v20.4s, v16.4s, v3.4s \n" "st1 {v17.4s}, [%0], #16 \n" "st1 {v18.4s}, [%1], #16 \n" "subs w1, w1, #1 \n" "st1 {v19.4s}, [%2], #16 \n" "st1 {v20.4s}, [%3], #16 \n" "bne 1b \n" //END tile loop "2: \n" "subs w0, w0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20"); #else asm volatile( "mov r0, #16 \n" // r0 = r = 16 "0: \n" "pld [%5, #256] \n" "vld1.f32 {d0-d3}, [%5 :128]! \n" // q0 q1 = _k00 _k10 "pld [%5, #256] \n" "vld1.f32 {d4-d7}, [%5 :128]! \n" // q2 q3 = _k20 _k30 // tile loop "mov r1, %12 \n" // r1 = tiles "cmp r1, #0 \n" "beq 2f \n" //BEGIN tile loop "1: \n" "pld [%4, #128] \n" "vld1.f32 {d24-d25}, [%4 :128]! \n" // q12 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q12, q0 \n" "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128] \n" // q9 = _output1_tm "vmla.f32 q9, q12, q1 \n" "pld [%2, #128] \n" "vld1.f32 {d20-d21}, [%2 :128] \n" // q10 = _output2_tm "vmla.f32 q10, q12, q2 \n" "pld [%3, #128] \n" "vld1.f32 {d22-d23}, [%3 :128] \n" // q11 = _output3_tm "vmla.f32 q11, q12, q3 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" "vst1.f32 {d18-d19}, [%1 :128]! \n" "subs r1, #1 \n" "vst1.f32 {d20-d21}, [%2 :128]! \n" "vst1.f32 {d22-d23}, [%3 :128]! \n" "bne 1b \n" //END tile loop "2: \n" "subs r0, #1 \n" "bne 0b \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(r0), // %4 "=r"(ktm) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(r0), "5"(ktm), "r"(tiles) // %12 : "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"); #endif // __aarch64__ #else for (int r = 0; r < 16; r++) { for (int t = 0; t < tiles; t++) { for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[0 + m]; output1_tm[m] += r0[m] * ktm[4 + m]; output2_tm[m] += r0[m] * ktm[8 + m]; output3_tm[m] += r0[m] * ktm[12 + m]; } r0 += 4; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; } ktm += 16; } #endif // __ARM_NEON } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8 * 8 * inch * (p - remain_outch_start); out0_tm.fill(0.f); int q = 0; for (; q < inch; q++) { const float* r0 = bottom_blob_tm.channel(q); float* output0_tm = out0_tm; for (int r = 0; r < 16; r++) { #if __ARM_NEON float32x4_t _k00 = vld1q_f32(ktm); ktm += 4; #endif // __ARM_NEON // tile for (int i = 0; i < tiles; i++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v17.4s}, [%1], #16 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v16.4s}, [%0] \n" "fmla v16.4s, v17.4s, %4.4s \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "v16", "v17"); #else asm volatile( "pld [%1, #128] \n" "vld1.f32 {d18-d19}, [%1 :128]! \n" // q9 = _r0 "pld [%0, #128] \n" "vld1.f32 {d16-d17}, [%0 :128] \n" // q8 = _output0_tm "vmla.f32 q8, q9, %q4 \n" "vst1.f32 {d16-d17}, [%0 :128]! \n" : "=r"(output0_tm), // %0 "=r"(r0) // %1 : "0"(output0_tm), "1"(r0), "w"(_k00) // %4 : "cc", "memory", "q8", "q9"); #endif // __aarch64__ #else for (int m = 0; m < 4; m++) { output0_tm[m] += r0[m] * ktm[m]; } r0 += 4; output0_tm += 4; #endif // __ARM_NEON } #if !__ARM_NEON ktm += 4; #endif // __ARM_NEON } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = {4.f, 8.f, 16.f, 32.f}; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { #if __ARM_NEON const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm0_4 = out0_tm.row(i * w_tm / 8 + j + tiles); const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 2); const float* output0_tm1_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 3); const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 4); const float* output0_tm2_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 5); const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 6); const float* output0_tm3_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 7); #if __aarch64__ for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0); float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4); float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0); float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4); float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0); float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4); float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0); float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4); float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123); float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567); float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123); float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567); // no vswp intrinsic :( float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0])); float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1])); float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1])); float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0])); float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1])); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0_0 += out0_tm.w * tiles * 2 * 4; output0_tm0_4 += out0_tm.w * tiles * 2 * 4; output0_tm1_0 += out0_tm.w * tiles * 2 * 4; output0_tm1_4 += out0_tm.w * tiles * 2 * 4; output0_tm2_0 += out0_tm.w * tiles * 2 * 4; output0_tm2_4 += out0_tm.w * tiles * 2 * 4; output0_tm3_0 += out0_tm.w * tiles * 2 * 4; output0_tm3_4 += out0_tm.w * tiles * 2 * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m = 0; m + 1 < 6; m += 2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8 * 2; t1 += 8 * 2; output0 += outw * 2; output1 += outw * 2; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; int step = out0_tm.w * tiles * 2 * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d17}, [%2], %21 \n" "vld1.f32 {d18-d19}, [%3], %21 \n" "vld1.f32 {d20-d21}, [%4], %21 \n" "vld1.f32 {d22-d23}, [%5], %21 \n" "vld1.f32 {d24-d25}, [%6], %21 \n" "vld1.f32 {d26-d27}, [%7], %21 \n" "vld1.f32 {d28-d29}, [%8], %21 \n" "vld1.f32 {d30-d31}, [%9], %21 \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 "vld1.f32 {d16-d17}, [%2] \n" "vld1.f32 {d18-d19}, [%3] \n" "vld1.f32 {d20-d21}, [%4] \n" "vld1.f32 {d22-d23}, [%5] \n" "vld1.f32 {d24-d25}, [%6] \n" "vld1.f32 {d26-d27}, [%7] \n" "vld1.f32 {d28-d29}, [%8] \n" "vld1.f32 {d30-d31}, [%9] \n" "vtrn.32 q8, q10 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm0_4), // %3 "=r"(output0_tm1_0), // %4 "=r"(output0_tm1_4), // %5 "=r"(output0_tm2_0), // %6 "=r"(output0_tm2_4), // %7 "=r"(output0_tm3_0), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm0_4), "4"(output0_tm1_0), "5"(output0_tm1_4), "6"(output0_tm2_0), "7"(output0_tm2_4), "8"(output0_tm3_0), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw * 2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles); for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_0[1] + output0_tm_0[2]; float tmp135a = output0_tm_0[1] - output0_tm_0[2]; float tmp024b = output0_tm_0[3] + output0_tm_4[0]; float tmp135b = output0_tm_0[3] - output0_tm_4[0]; float tmp024c = output0_tm_4[1] + output0_tm_4[2]; float tmp135c = output0_tm_4[1] - output0_tm_4[2]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 2; output0_tm_4 += out0_tm.w * tiles * 2; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; Option opt_b = opt; opt_b.blob_allocator = opt.workspace_allocator; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; bottom_blob_tm.create(1, 64 * tiles, inch, 4u, opt.workspace_allocator); // bottom_blob_tm.create(inch, tiles, 64); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #if __ARM_NEON const float coeff[8] = { 0.25f, 0.5f, -1.25f, 2.f, -2.5f, 4.f, 4.25f, 5.25f }; float32x4_t _coeff0 = vld1q_f32(coeff); float32x4_t _coeff1 = vld1q_f32(coeff + 4); #endif // __ARM_NEON #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { #if __ARM_NEON const float* r0 = img0.row(i * 6) + j * 6; const float* r1 = r0 + w; const float* r2 = r0 + w * 2; const float* r3 = r0 + w * 3; // the assembly block for armv7 input transform requires 13 general registers // old gcc may fail to allocate register on debug build without -fomit-frame-pointer // so, fallback to intrinsic version for armv7 debug build --- nihui #if __aarch64__ || !defined(NDEBUG) for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _r0_0123 = vld1q_f32(r0); float32x4_t _r0_4567 = vld1q_f32(r0 + 4); float32x4_t _r1_0123 = vld1q_f32(r1); float32x4_t _r1_4567 = vld1q_f32(r1 + 4); float32x4_t _r2_0123 = vld1q_f32(r2); float32x4_t _r2_4567 = vld1q_f32(r2 + 4); float32x4_t _r3_0123 = vld1q_f32(r3); float32x4_t _r3_4567 = vld1q_f32(r3 + 4); float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123); float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567); float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123); float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567); // no vswp intrinsic :( float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0])); float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1])); float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0])); float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1])); float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0])); float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1])); float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0])); float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1])); float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66); float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11); float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22); float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55); float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[7][m], _tmp7); float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66); float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55); float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0); float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[2][m], _tmp2); float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0); float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1); float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[4][m], _tmp4); // reuse r04 * 1.25 // reuse r03 * 2.5 float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1); float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(&tmp[5][m], _tmp5); vst1q_f32(&tmp[6][m], _tmp6); r0 += w * 4; r1 += w * 4; r2 += w * 4; r3 += w * 4; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; const float* t2 = tmp[2]; const float* t3 = tmp[3]; float* r0_tm0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm1 = img0_tm.row(i * w_tm / 8 + j + tiles * 8); float* r0_tm2 = img0_tm.row(i * w_tm / 8 + j + tiles * 16); float* r0_tm3 = img0_tm.row(i * w_tm / 8 + j + tiles * 24); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4_t _t2_0123 = vld1q_f32(t2); float32x4_t _t2_4567 = vld1q_f32(t2 + 4); float32x4_t _t3_0123 = vld1q_f32(t3); float32x4_t _t3_4567 = vld1q_f32(t3 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123); float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567); // no vswp intrinsic :( float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0])); float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1])); float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0])); float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1])); float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0])); float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1])); float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0])); float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1])); float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66); float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11); float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22); float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55); float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1); float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66); float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55); float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0); float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0); float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0); float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0); float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c); _tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0); float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1); _tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1); float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c); float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1); float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1); _tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1); float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b); r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3); r0_tm0 += img0_tm.w * tiles; r0_tm1 += img0_tm.w * tiles; r0_tm2 += img0_tm.w * tiles; r0_tm3 += img0_tm.w * tiles; r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0); r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1); r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2); r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3); t0 += 8 * 4; t1 += 8 * 4; t2 += 8 * 4; t3 += 8 * 4; r0_tm0 += img0_tm.w * tiles * 25; r0_tm1 += img0_tm.w * tiles * 25; r0_tm2 += img0_tm.w * tiles * 25; r0_tm3 += img0_tm.w * tiles * 25; } #else // __aarch64__ float* t0 = tmp[0]; float* t1 = tmp[1]; float* t2 = tmp[2]; float* t3 = tmp[3]; float* t4 = tmp[4]; float* t5 = tmp[5]; float* t6 = tmp[6]; float* t7 = tmp[7]; int stepw = w * 4 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8], %26 \n" "vld1.f32 {d20-d23}, [%9], %26 \n" "vld1.f32 {d24-d27}, [%10], %26 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11], %26 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4-d5}, [%0]! \n" // tmp[0][m] "vmov q3, q7 \n" // use q7 "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" // tmp[1][m] "vmla.f32 q4, q6, %e25[1] \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18-d19}, [%2]! \n" // tmp[2][m] "vadd.f32 q8, q2, q3 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vadd.f32 q2, q4, q5 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d16-d17}, [%3]! \n" // tmp[3][m] "vst1.f32 {d18-d19}, [%4]! \n" // tmp[4][m] "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d4-d5}, [%5]! \n" // tmp[5][m] "vst1.f32 {d6-d7}, [%6]! \n" // tmp[6][m] "vst1.f32 {d12-d13}, [%7]! \n" // tmp[7][m] : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(t2), // %2 "=r"(t3), // %3 "=r"(t4), // %4 "=r"(t5), // %5 "=r"(t6), // %6 "=r"(t7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(r3) // %11 : "0"(t0), "1"(t1), "2"(t2), "3"(t3), "4"(t4), "5"(t5), "6"(t6), "7"(t7), "8"(r0), "9"(r1), "10"(r2), "11"(r3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(stepw) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; t2 = tmp[2]; t3 = tmp[3]; float* r0_tm0_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm1_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 8); float* r0_tm2_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 16); float* r0_tm3_0 = img0_tm.row(i * w_tm / 8 + j + tiles * 24); float* r0_tm0_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 32); float* r0_tm1_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 40); float* r0_tm2_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 48); float* r0_tm3_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 56); int step = img0_tm.w * tiles * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%8] \n" "add %8, %8, #128 \n" "vld1.f32 {d20-d23}, [%9] \n" "add %9, %9, #128 \n" "vld1.f32 {d24-d27}, [%10] \n" "add %10, %10, #128 \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "add %11, %11, #128 \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%0], %26 \n" "vst1.f32 {d4[1]}, [%1], %26 \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%2], %26 \n" "vst1.f32 {d5[1]}, [%3], %26 \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%1], %26 \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%2], %26 \n" "vst1.f32 {d17[1]}, [%3], %26 \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%0], %26 \n" "vst1.f32 {d18[1]}, [%1], %26 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%2], %26 \n" "vst1.f32 {d19[1]}, [%3], %26 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%0], %26 \n" "vst1.f32 {d16[1]}, [%1], %26 \n" "vst1.f32 {d17[0]}, [%2], %26 \n" "vst1.f32 {d17[1]}, [%3], %26 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%0], %26 \n" "vst1.f32 {d18[1]}, [%1], %26 \n" "vst1.f32 {d19[0]}, [%2], %26 \n" "vst1.f32 {d19[1]}, [%3], %26 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%0], %26 \n" "vst1.f32 {d4[1]}, [%1], %26 \n" "vst1.f32 {d5[0]}, [%2], %26 \n" "vst1.f32 {d5[1]}, [%3], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d6[0]}, [%0], %26 \n" "vst1.f32 {d6[1]}, [%1], %26 \n" "vst1.f32 {d7[0]}, [%2], %26 \n" "vst1.f32 {d7[1]}, [%3], %26 \n" "vst1.f32 {d12[0]}, [%0] \n" "vst1.f32 {d12[1]}, [%1] \n" "vst1.f32 {d13[0]}, [%2] \n" "vst1.f32 {d13[1]}, [%3] \n" // loop1 "vld1.f32 {d16-d19}, [%8] \n" "vld1.f32 {d20-d23}, [%9] \n" "vld1.f32 {d24-d27}, [%10] \n" "vtrn.32 q8, q10 \n" "vld1.f32 {d28-d31}, [%11] \n" "vtrn.32 q9, q11 \n" "vtrn.32 q12, q14 \n" "vtrn.32 q13, q15 \n" "vswp d17, d24 \n" "vswp d19, d26 \n" "vswp d21, d28 \n" // q8 = 00 q9 = 44 q10 = 11 q11 = 55 "vswp d23, d30 \n" // q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vsub.f32 q2, q8, q13 \n" "vsub.f32 q3, q9, q12 \n" "vadd.f32 q4, q12, q13 \n" "vadd.f32 q5, q10, q11 \n" "vmla.f32 q2, q3, %f25[1] \n" "vmul.f32 q7, q14, %e25[0] \n" // q7 = _r_3_x_c "vmul.f32 q6, q9, %f24[0] \n" // q6 = _r_4_x_c "vmls.f32 q4, q9, %f25[0] \n" "vmls.f32 q5, q14, %f25[0] \n" "vst1.f32 {d4[0]}, [%4], %26 \n" "vst1.f32 {d4[1]}, [%5], %26 \n" "vmov q3, q7 \n" // use q7 "vst1.f32 {d5[0]}, [%6], %26 \n" "vst1.f32 {d5[1]}, [%7], %26 \n" "vadd.f32 q2, q13, q6 \n" // use q6 "vmla.f32 q3, q10, %e24[1] \n" "vadd.f32 q8, q4, q5 \n" "vsub.f32 q9, q4, q5 \n" "vmov q5, q7 \n" // use q7 "vadd.f32 q6, q12, q6 \n" // use q6 "vmla.f32 q5, q10, %f24[1] \n" "vmov q4, q13 \n" "vmla.f32 q2, q12, %e24[0] \n" "vmla.f32 q3, q11, %f24[1] \n" "vst1.f32 {d16[0]}, [%4], %26 \n" "vst1.f32 {d16[1]}, [%5], %26 \n" "vmla.f32 q4, q6, %e25[1] \n" "vst1.f32 {d17[0]}, [%6], %26 \n" "vst1.f32 {d17[1]}, [%7], %26 \n" "vmla.f32 q5, q11, %e24[1] \n" "vst1.f32 {d18[0]}, [%4], %26 \n" "vst1.f32 {d18[1]}, [%5], %26 \n" "vadd.f32 q8, q2, q3 \n" "vst1.f32 {d19[0]}, [%6], %26 \n" "vst1.f32 {d19[1]}, [%7], %26 \n" "vsub.f32 q9, q2, q3 \n" "vsub.f32 q6, q15, q10 \n" "vsub.f32 q7, q14, q11 \n" "vst1.f32 {d16[0]}, [%4], %26 \n" "vst1.f32 {d16[1]}, [%5], %26 \n" "vst1.f32 {d17[0]}, [%6], %26 \n" "vst1.f32 {d17[1]}, [%7], %26 \n" "vadd.f32 q2, q4, q5 \n" "vst1.f32 {d18[0]}, [%4], %26 \n" "vst1.f32 {d18[1]}, [%5], %26 \n" "vst1.f32 {d19[0]}, [%6], %26 \n" "vst1.f32 {d19[1]}, [%7], %26 \n" "vsub.f32 q3, q4, q5 \n" "vst1.f32 {d4[0]}, [%4], %26 \n" "vst1.f32 {d4[1]}, [%5], %26 \n" "vst1.f32 {d5[0]}, [%6], %26 \n" "vst1.f32 {d5[1]}, [%7], %26 \n" "vmla.f32 q6, q7, %f25[1] \n" "vst1.f32 {d6[0]}, [%4], %26 \n" "vst1.f32 {d6[1]}, [%5], %26 \n" "vst1.f32 {d7[0]}, [%6], %26 \n" "vst1.f32 {d7[1]}, [%7], %26 \n" "vst1.f32 {d12[0]}, [%4] \n" "vst1.f32 {d12[1]}, [%5] \n" "vst1.f32 {d13[0]}, [%6] \n" "vst1.f32 {d13[1]}, [%7] \n" : "=r"(r0_tm0_0), // %0 "=r"(r0_tm1_0), // %1 "=r"(r0_tm2_0), // %2 "=r"(r0_tm3_0), // %3 "=r"(r0_tm0_4), // %4 "=r"(r0_tm1_4), // %5 "=r"(r0_tm2_4), // %6 "=r"(r0_tm3_4), // %7 "=r"(t0), // %8 "=r"(t1), // %9 "=r"(t2), // %10 "=r"(t3) // %11 : "0"(r0_tm0_0), "1"(r0_tm1_0), "2"(r0_tm2_0), "3"(r0_tm3_0), "4"(r0_tm0_4), "5"(r0_tm1_4), "6"(r0_tm2_4), "7"(r0_tm3_4), "8"(t0), "9"(t1), "10"(t2), "11"(t3), "w"(_coeff0), // %24 "w"(_coeff1), // %25 "r"(step) // %26 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* r0 = img0.row(i * 6) + j * 6; for (int m = 0; m < 8; m++) { tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f; tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f; float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f); float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f); tmp[1][m] = tmp12a + tmp12b; tmp[2][m] = tmp12a - tmp12b; float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f); float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f); tmp[3][m] = tmp34a + tmp34b; tmp[4][m] = tmp34a - tmp34b; float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f); float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f); tmp[5][m] = tmp56a + tmp56b; tmp[6][m] = tmp56a - tmp56b; r0 += w; } float* r0_tm_0 = img0_tm.row(i * w_tm / 8 + j); float* r0_tm_1 = img0_tm.row(i * w_tm / 8 + j + tiles); float* r0_tm_2 = img0_tm.row(i * w_tm / 8 + j + tiles * 2); float* r0_tm_3 = img0_tm.row(i * w_tm / 8 + j + tiles * 3); float* r0_tm_4 = img0_tm.row(i * w_tm / 8 + j + tiles * 4); float* r0_tm_5 = img0_tm.row(i * w_tm / 8 + j + tiles * 5); float* r0_tm_6 = img0_tm.row(i * w_tm / 8 + j + tiles * 6); float* r0_tm_7 = img0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m < 8; m++) { const float* tmp0 = tmp[m]; r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f; r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f; float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f); float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]); r0_tm_1[0] = tmp12a + tmp12b; r0_tm_2[0] = tmp12a - tmp12b; float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f); float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f); r0_tm_3[0] = tmp34a + tmp34b; r0_tm_4[0] = tmp34a - tmp34b; float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f); float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f); r0_tm_5[0] = tmp56a + tmp56b; r0_tm_6[0] = tmp56a - tmp56b; r0_tm_0 += img0_tm.w * tiles * 8; r0_tm_1 += img0_tm.w * tiles * 8; r0_tm_2 += img0_tm.w * tiles * 8; r0_tm_3 += img0_tm.w * tiles * 8; r0_tm_4 += img0_tm.w * tiles * 8; r0_tm_5 += img0_tm.w * tiles * 8; r0_tm_6 += img0_tm.w * tiles * 8; r0_tm_7 += img0_tm.w * tiles * 8; } #endif // __ARM_NEON } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // permute // bottom_blob_tm.create(1, 64 * tiles, inch); // Mat bottom_blob_tm2(inch, tiles, 64); Mat bottom_blob_tm2(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { float* tm2p = tm2.row(i / 8); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r0n = vld1q_f32(r0 + 4); vst1q_f32(tm2p, _r0); vst1q_f32(tm2p + 4, _r0n); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; tm2p[4] = r0[4]; tm2p[5] = r0[5]; tm2p[6] = r0[6]; tm2p[7] = r0[7]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 8; } } for (; i + 3 < tiles; i += 4) { float* tm2p = tm2.row(i / 8 + (i % 8) / 4); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { #if __ARM_NEON float32x4_t _r0 = vld1q_f32(r0); vst1q_f32(tm2p, _r0); #else tm2p[0] = r0[0]; tm2p[1] = r0[1]; tm2p[2] = r0[2]; tm2p[3] = r0[3]; #endif // __ARM_NEON r0 += bottom_blob_tm.cstep; tm2p += 4; } } for (; i < tiles; i++) { float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4); const float* r0 = bottom_blob_tm; r0 += r * tiles + i; for (int q = 0; q < inch; q++) { tm2p[0] = r0[0]; r0 += bottom_blob_tm.cstep; tm2p += 1; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(1, 64 * tiles, outch); int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const Mat kernel_tm0 = kernel_tm.channel(p / 8); Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); Mat out4_tm = top_blob_tm.channel(p + 4); Mat out5_tm = top_blob_tm.channel(p + 5); Mat out6_tm = top_blob_tm.channel(p + 6); Mat out7_tm = top_blob_tm.channel(p + 7); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; float* output4_tm = out4_tm; float* output5_tm = out5_tm; float* output6_tm = out6_tm; float* output7_tm = out7_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" // inch loop "lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "fmla v16.4s, v10.4s, v2.s[0] \n" "fmla v17.4s, v11.4s, v2.s[0] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v11.4s, v2.s[1] \n" "fmla v20.4s, v10.4s, v2.s[2] \n" "fmla v21.4s, v11.4s, v2.s[2] \n" "fmla v22.4s, v10.4s, v2.s[3] \n" "fmla v23.4s, v11.4s, v2.s[3] \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n" "fmla v24.4s, v10.4s, v3.s[0] \n" "fmla v25.4s, v11.4s, v3.s[0] \n" "fmla v26.4s, v10.4s, v3.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v10.4s, v3.s[2] \n" "fmla v29.4s, v11.4s, v3.s[2] \n" "fmla v30.4s, v10.4s, v3.s[3] \n" "fmla v31.4s, v11.4s, v3.s[3] \n" "fmla v16.4s, v12.4s, v4.s[0] \n" "fmla v17.4s, v13.4s, v4.s[0] \n" "fmla v18.4s, v12.4s, v4.s[1] \n" "fmla v19.4s, v13.4s, v4.s[1] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v13.4s, v4.s[2] \n" "fmla v22.4s, v12.4s, v4.s[3] \n" "fmla v23.4s, v13.4s, v4.s[3] \n" "fmla v24.4s, v12.4s, v5.s[0] \n" "fmla v25.4s, v13.4s, v5.s[0] \n" "fmla v26.4s, v12.4s, v5.s[1] \n" "fmla v27.4s, v13.4s, v5.s[1] \n" "fmla v28.4s, v12.4s, v5.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v12.4s, v5.s[3] \n" "fmla v31.4s, v13.4s, v5.s[3] \n" "fmla v16.4s, v14.4s, v6.s[0] \n" "fmla v17.4s, v15.4s, v6.s[0] \n" "fmla v18.4s, v14.4s, v6.s[1] \n" "fmla v19.4s, v15.4s, v6.s[1] \n" "fmla v20.4s, v14.4s, v6.s[2] \n" "fmla v21.4s, v15.4s, v6.s[2] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v15.4s, v6.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v14.4s, v7.s[0] \n" "fmla v25.4s, v15.4s, v7.s[0] \n" "fmla v26.4s, v14.4s, v7.s[1] \n" "fmla v27.4s, v15.4s, v7.s[1] \n" "fmla v28.4s, v14.4s, v7.s[2] \n" "fmla v29.4s, v15.4s, v7.s[2] \n" "fmla v30.4s, v14.4s, v7.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #256] \n" "ld1 {v8.4s, v9.4s}, [%8], #32 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "fmla v18.4s, v8.4s, v0.s[1] \n" "fmla v19.4s, v9.4s, v0.s[1] \n" "fmla v20.4s, v8.4s, v0.s[2] \n" "fmla v21.4s, v9.4s, v0.s[2] \n" "fmla v22.4s, v8.4s, v0.s[3] \n" "fmla v23.4s, v9.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v24.4s, v8.4s, v1.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v8.4s, v1.s[1] \n" "fmla v27.4s, v9.4s, v1.s[1] \n" "fmla v28.4s, v8.4s, v1.s[2] \n" "fmla v29.4s, v9.4s, v1.s[2] \n" "fmla v30.4s, v8.4s, v1.s[3] \n" "fmla v31.4s, v9.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" // inch loop "lsr w4, %w20, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%8, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n" "fmla v16.4s, v9.4s, v2.s[0] \n" "fmla v17.4s, v9.4s, v2.s[1] \n" "fmla v18.4s, v9.4s, v2.s[2] \n" "fmla v19.4s, v9.4s, v2.s[3] \n" "fmla v20.4s, v9.4s, v3.s[0] \n" "fmla v21.4s, v9.4s, v3.s[1] \n" "fmla v22.4s, v9.4s, v3.s[2] \n" "fmla v23.4s, v9.4s, v3.s[3] \n" "fmla v16.4s, v10.4s, v4.s[0] \n" "fmla v17.4s, v10.4s, v4.s[1] \n" "fmla v18.4s, v10.4s, v4.s[2] \n" "fmla v19.4s, v10.4s, v4.s[3] \n" "fmla v20.4s, v10.4s, v5.s[0] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v5.s[2] \n" "fmla v23.4s, v10.4s, v5.s[3] \n" "subs w4, w4, #1 \n" "fmla v16.4s, v11.4s, v6.s[0] \n" "fmla v17.4s, v11.4s, v6.s[1] \n" "fmla v18.4s, v11.4s, v6.s[2] \n" "fmla v19.4s, v11.4s, v6.s[3] \n" "fmla v20.4s, v11.4s, v7.s[0] \n" "fmla v21.4s, v11.4s, v7.s[1] \n" "fmla v22.4s, v11.4s, v7.s[2] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v8.4s}, [%8], #16 \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v0.4s, v1.4s}, [%9], #32 \n" "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v0.s[1] \n" "fmla v18.4s, v8.4s, v0.s[2] \n" "fmla v19.4s, v8.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "fmla v20.4s, v8.4s, v1.s[0] \n" "fmla v21.4s, v8.4s, v1.s[1] \n" "fmla v22.4s, v8.4s, v1.s[2] \n" "fmla v23.4s, v8.4s, v1.s[3] \n" "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(output4_tm), // %4 "=r"(output5_tm), // %5 "=r"(output6_tm), // %6 "=r"(output7_tm), // %7 "=r"(bb2p0), // %8 "=r"(ktm0) // %9 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(output4_tm), "5"(output5_tm), "6"(output6_tm), "7"(output7_tm), "8"(bb2p0), "9"(ktm0), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); float32x4_t _sum0123 = vdupq_n_f32(0.f); float32x4_t _sum4567 = vdupq_n_f32(0.f); int q = 0; for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm1, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 1); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm3, _bb2p0, 1); // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm4 = vld1q_f32(ktm0 + 0); float32x4_t _ktm5 = vld1q_f32(ktm0 + 4); float32x4_t _ktm6 = vld1q_f32(ktm0 + 8); float32x4_t _ktm7 = vld1q_f32(ktm0 + 12); ktm0 += 16; _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm4, _bb2p0, 2); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm5, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm6, _bb2p0, 3); _sum4567 = vmlaq_laneq_f32(_sum4567, _ktm7, _bb2p0, 3); } for (; q < inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0123 = vld1q_f32(ktm0 + 0); float32x4_t _ktm4567 = vld1q_f32(ktm0 + 4); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0123); _sum4567 = vmlaq_f32(_sum4567, _bb2p0, _ktm4567); bb2p0 += 1; ktm0 += 8; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); float sum4 = vgetq_lane_f32(_sum4567, 0); float sum5 = vgetq_lane_f32(_sum4567, 1); float sum6 = vgetq_lane_f32(_sum4567, 2); float sum7 = vgetq_lane_f32(_sum4567, 3); output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output4_tm[0] = sum4; output5_tm[0] = sum5; output6_tm[0] = sum6; output7_tm[0] = sum7; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } #endif // __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4); #else const Mat kernel_tm0 = kernel_tm.channel(p / 4); #endif Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p + 1); Mat out2_tm = top_blob_tm.channel(p + 2); Mat out3_tm = top_blob_tm.channel(p + 3); float* output0_tm = out0_tm; float* output1_tm = out1_tm; float* output2_tm = out2_tm; float* output3_tm = out3_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" "eor v12.16b, v12.16b, v12.16b \n" "eor v13.16b, v13.16b, v13.16b \n" "eor v14.16b, v14.16b, v14.16b \n" "eor v15.16b, v15.16b, v15.16b \n" // inch loop "lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v7.4s, v1.s[0] \n" "fmla v10.4s, v6.4s, v1.s[1] \n" "fmla v11.4s, v7.4s, v1.s[1] \n" "fmla v12.4s, v6.4s, v1.s[2] \n" "fmla v13.4s, v7.4s, v1.s[2] \n" "fmla v14.4s, v6.4s, v1.s[3] \n" "fmla v15.4s, v7.4s, v1.s[3] \n" "fmla v8.4s, v16.4s, v2.s[0] \n" "fmla v9.4s, v17.4s, v2.s[0] \n" "fmla v10.4s, v16.4s, v2.s[1] \n" "fmla v11.4s, v17.4s, v2.s[1] \n" "fmla v12.4s, v16.4s, v2.s[2] \n" "fmla v13.4s, v17.4s, v2.s[2] \n" "fmla v14.4s, v16.4s, v2.s[3] \n" "fmla v15.4s, v17.4s, v2.s[3] \n" "fmla v8.4s, v18.4s, v3.s[0] \n" "fmla v9.4s, v19.4s, v3.s[0] \n" "fmla v10.4s, v18.4s, v3.s[1] \n" "fmla v11.4s, v19.4s, v3.s[1] \n" "fmla v12.4s, v18.4s, v3.s[2] \n" "fmla v13.4s, v19.4s, v3.s[2] \n" "fmla v14.4s, v18.4s, v3.s[3] \n" "fmla v15.4s, v19.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v4.4s, v5.4s}, [%4], #32 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v10.4s, v4.4s, v0.s[1] \n" "fmla v11.4s, v5.4s, v0.s[1] \n" "fmla v12.4s, v4.4s, v0.s[2] \n" "fmla v13.4s, v5.4s, v0.s[2] \n" "fmla v14.4s, v4.4s, v0.s[3] \n" "fmla v15.4s, v5.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "st1 {v12.4s, v13.4s}, [%2], #32 \n" "st1 {v14.4s, v15.4s}, [%3], #32 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" "veor q12, q12, q12 \n" "veor q13, q13, q13 \n" "veor q14, q14, q14 \n" "veor q15, q15, q15 \n" // inch loop "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "vmla.f32 q8, q6, d2[0] \n" "vmla.f32 q9, q7, d2[0] \n" "vmla.f32 q10, q6, d2[1] \n" "vmla.f32 q11, q7, d2[1] \n" "vmla.f32 q12, q6, d3[0] \n" "vmla.f32 q13, q7, d3[0] \n" "vmla.f32 q14, q6, d3[1] \n" "vmla.f32 q15, q7, d3[1] \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q5, d4[0] \n" "vmla.f32 q10, q4, d4[1] \n" "vmla.f32 q11, q5, d4[1] \n" "vmla.f32 q12, q4, d5[0] \n" "vmla.f32 q13, q5, d5[0] \n" "vmla.f32 q14, q4, d5[1] \n" "vmla.f32 q15, q5, d5[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d6[0] \n" "vmla.f32 q9, q7, d6[0] \n" "vmla.f32 q10, q6, d6[1] \n" "vmla.f32 q11, q7, d6[1] \n" "vmla.f32 q12, q6, d7[0] \n" "vmla.f32 q13, q7, d7[0] \n" "vmla.f32 q14, q6, d7[1] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q10, q4, d0[1] \n" "vmla.f32 q11, q5, d0[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q12, q4, d1[0] \n" "vmla.f32 q13, q5, d1[0] \n" "vmla.f32 q14, q4, d1[1] \n" "vmla.f32 q15, q5, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" "vst1.f32 {d20-d23}, [%1]! \n" "vst1.f32 {d24-d27}, [%2]! \n" "vst1.f32 {d28-d31}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum0_4 = 0.f; float sum0_5 = 0.f; float sum0_6 = 0.f; float sum0_7 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum1_4 = 0.f; float sum1_5 = 0.f; float sum1_6 = 0.f; float sum1_7 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum2_4 = 0.f; float sum2_5 = 0.f; float sum2_6 = 0.f; float sum2_7 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; float sum3_4 = 0.f; float sum3_5 = 0.f; float sum3_6 = 0.f; float sum3_7 = 0.f; for (int q = 0; q < inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum0_4 += bb2p0[4] * ktm0[0]; sum0_5 += bb2p0[5] * ktm0[0]; sum0_6 += bb2p0[6] * ktm0[0]; sum0_7 += bb2p0[7] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum1_4 += bb2p0[4] * ktm0[1]; sum1_5 += bb2p0[5] * ktm0[1]; sum1_6 += bb2p0[6] * ktm0[1]; sum1_7 += bb2p0[7] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum2_4 += bb2p0[4] * ktm0[2]; sum2_5 += bb2p0[5] * ktm0[2]; sum2_6 += bb2p0[6] * ktm0[2]; sum2_7 += bb2p0[7] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; sum3_4 += bb2p0[4] * ktm0[3]; sum3_5 += bb2p0[5] * ktm0[3]; sum3_6 += bb2p0[6] * ktm0[3]; sum3_7 += bb2p0[7] * ktm0[3]; bb2p0 += 8; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output0_tm[4] = sum0_4; output0_tm[5] = sum0_5; output0_tm[6] = sum0_6; output0_tm[7] = sum0_7; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output1_tm[4] = sum1_4; output1_tm[5] = sum1_5; output1_tm[6] = sum1_6; output1_tm[7] = sum1_7; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output2_tm[4] = sum2_4; output2_tm[5] = sum2_5; output2_tm[6] = sum2_6; output2_tm[7] = sum2_7; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output3_tm[4] = sum3_4; output3_tm[5] = sum3_5; output3_tm[6] = sum3_6; output3_tm[7] = sum3_7; output0_tm += 8; output1_tm += 8; output2_tm += 8; output3_tm += 8; #endif // __ARM_NEON } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" "eor v10.16b, v10.16b, v10.16b \n" "eor v11.16b, v11.16b, v11.16b \n" // inch loop "lsr w4, %w12, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v8.4s, v5.4s, v1.s[0] \n" "fmla v9.4s, v5.4s, v1.s[1] \n" "fmla v10.4s, v5.4s, v1.s[2] \n" "fmla v11.4s, v5.4s, v1.s[3] \n" "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "fmla v8.4s, v7.4s, v3.s[0] \n" "fmla v9.4s, v7.4s, v3.s[1] \n" "fmla v10.4s, v7.4s, v3.s[2] \n" "fmla v11.4s, v7.4s, v3.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v10.4s}, [%2], #16 \n" "st1 {v11.4s}, [%3], #16 \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "veor q10, q10, q10 \n" "veor q11, q11, q11 \n" // inch loop "lsr r4, %12, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #512] \n" "vldm %5!, {d0-d7} \n" // "vld1.f32 {d0-d3}, [%5 :128]! \n" // "vld1.f32 {d4-d7}, [%5 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q8, q5, d2[0] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d3[0] \n" "vmla.f32 q11, q5, d3[1] \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vmla.f32 q8, q7, d6[0] \n" "vmla.f32 q9, q7, d6[1] \n" "vmla.f32 q10, q7, d7[0] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %12, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" "vst1.f32 {d18-d19}, [%1]! \n" "vst1.f32 {d20-d21}, [%2]! \n" "vst1.f32 {d22-d23}, [%3]! \n" : "=r"(output0_tm), // %0 "=r"(output1_tm), // %1 "=r"(output2_tm), // %2 "=r"(output3_tm), // %3 "=r"(bb2p0), // %4 "=r"(ktm0) // %5 : "0"(output0_tm), "1"(output1_tm), "2"(output2_tm), "3"(output3_tm), "4"(bb2p0), "5"(ktm0), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif // __aarch64__ #else float sum0_0 = 0.f; float sum0_1 = 0.f; float sum0_2 = 0.f; float sum0_3 = 0.f; float sum1_0 = 0.f; float sum1_1 = 0.f; float sum1_2 = 0.f; float sum1_3 = 0.f; float sum2_0 = 0.f; float sum2_1 = 0.f; float sum2_2 = 0.f; float sum2_3 = 0.f; float sum3_0 = 0.f; float sum3_1 = 0.f; float sum3_2 = 0.f; float sum3_3 = 0.f; for (int q = 0; q < inch; q++) { sum0_0 += bb2p0[0] * ktm0[0]; sum0_1 += bb2p0[1] * ktm0[0]; sum0_2 += bb2p0[2] * ktm0[0]; sum0_3 += bb2p0[3] * ktm0[0]; sum1_0 += bb2p0[0] * ktm0[1]; sum1_1 += bb2p0[1] * ktm0[1]; sum1_2 += bb2p0[2] * ktm0[1]; sum1_3 += bb2p0[3] * ktm0[1]; sum2_0 += bb2p0[0] * ktm0[2]; sum2_1 += bb2p0[1] * ktm0[2]; sum2_2 += bb2p0[2] * ktm0[2]; sum2_3 += bb2p0[3] * ktm0[2]; sum3_0 += bb2p0[0] * ktm0[3]; sum3_1 += bb2p0[1] * ktm0[3]; sum3_2 += bb2p0[2] * ktm0[3]; sum3_3 += bb2p0[3] * ktm0[3]; bb2p0 += 4; ktm0 += 4; } output0_tm[0] = sum0_0; output0_tm[1] = sum0_1; output0_tm[2] = sum0_2; output0_tm[3] = sum0_3; output1_tm[0] = sum1_0; output1_tm[1] = sum1_1; output1_tm[2] = sum1_2; output1_tm[3] = sum1_3; output2_tm[0] = sum2_0; output2_tm[1] = sum2_1; output2_tm[2] = sum2_2; output2_tm[3] = sum2_3; output3_tm[0] = sum3_0; output3_tm[1] = sum3_1; output3_tm[2] = sum3_2; output3_tm[3] = sum3_3; output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; #endif // __ARM_NEON } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON float32x4_t _sum0123 = vdupq_n_f32(0.f); int q = 0; for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; // asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :); float32x4_t _ktm0 = vld1q_f32(ktm0 + 0); float32x4_t _ktm1 = vld1q_f32(ktm0 + 4); float32x4_t _ktm2 = vld1q_f32(ktm0 + 8); float32x4_t _ktm3 = vld1q_f32(ktm0 + 12); ktm0 += 16; #if __aarch64__ _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm0, _bb2p0, 0); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm1, _bb2p0, 1); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm2, _bb2p0, 2); _sum0123 = vmlaq_laneq_f32(_sum0123, _ktm3, _bb2p0, 3); #else _sum0123 = vmlaq_lane_f32(_sum0123, _ktm0, vget_low_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm1, vget_low_f32(_bb2p0), 1); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm2, vget_high_f32(_bb2p0), 0); _sum0123 = vmlaq_lane_f32(_sum0123, _ktm3, vget_high_f32(_bb2p0), 1); #endif // __aarch64__ } for (; q < inch; q++) { float32x4_t _bb2p0 = vld1q_dup_f32(bb2p0); float32x4_t _ktm0 = vld1q_f32(ktm0); _sum0123 = vmlaq_f32(_sum0123, _bb2p0, _ktm0); bb2p0 += 1; ktm0 += 4; } float sum0 = vgetq_lane_f32(_sum0123, 0); float sum1 = vgetq_lane_f32(_sum0123, 1); float sum2 = vgetq_lane_f32(_sum0123, 2); float sum3 = vgetq_lane_f32(_sum0123, 3); #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[0] * ktm0[1]; sum2 += bb2p0[0] * ktm0[2]; sum3 += bb2p0[0] * ktm0[3]; bb2p0 += 1; ktm0 += 4; } #endif // __ARM_NEON output0_tm[0] = sum0; output1_tm[0] = sum1; output2_tm[0] = sum2; output3_tm[0] = sum3; output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; } } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { #if __ARM_NEON && __aarch64__ const Mat kernel_tm0 = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4); #else const Mat kernel_tm0 = kernel_tm.channel(p / 4 + p % 4); #endif Mat out0_tm = top_blob_tm.channel(p); float* output0_tm = out0_tm; for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { const float* bb2p0 = bb2.row(i / 8); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" "eor v9.16b, v9.16b, v9.16b \n" // inch loop "lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[0] \n" "fmla v8.4s, v6.4s, v0.s[1] \n" "fmla v9.4s, v7.4s, v0.s[1] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "fmla v8.4s, v12.4s, v0.s[2] \n" "fmla v9.4s, v13.4s, v0.s[2] \n" "fmla v8.4s, v14.4s, v0.s[3] \n" "fmla v9.4s, v15.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v4.4s, v5.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #32] \n" "ld1r {v0.4s}, [%2], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "fmla v9.4s, v5.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s, v9.4s}, [%0], #32 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" // inch loop "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%1, #512] \n" "vldm %1!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%1 :128]! \n" // "vld1.f32 {d12-d15}, [%1 :128]! \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q5, d0[0] \n" "vmla.f32 q8, q6, d0[1] \n" "vmla.f32 q9, q7, d0[1] \n" "pld [%1, #512] \n" "vldm %1!, {d24-d31} \n" // "vld1.f32 {d24-d27}, [%1 :128]! \n" // "vld1.f32 {d28-d31}, [%1 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q12, d1[0] \n" "vmla.f32 q9, q13, d1[0] \n" "vmla.f32 q8, q14, d1[1] \n" "vmla.f32 q9, q15, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%1, #256] \n" "vld1.f32 {d8-d11}, [%1 :128]! \n" "pld [%2, #32] \n" "vld1.f32 {d0[],d1[]}, [%2]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "vmla.f32 q9, q5, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d19}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; sum4 += bb2p0[4] * ktm0[0]; sum5 += bb2p0[5] * ktm0[0]; sum6 += bb2p0[6] * ktm0[0]; sum7 += bb2p0[7] * ktm0[0]; bb2p0 += 8; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm[4] = sum4; output0_tm[5] = sum5; output0_tm[6] = sum6; output0_tm[7] = sum7; output0_tm += 8; #endif // __ARM_NEON } for (; i + 3 < tiles; i += 4) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4); const float* ktm0 = kernel_tm0.row(r); #if __ARM_NEON #if __aarch64__ asm volatile( "eor v8.16b, v8.16b, v8.16b \n" // inch loop "lsr w4, %w6, #2 \n" // w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.4s}, [%5], #16 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v8.4s, v5.4s, v0.s[1] \n" "fmla v8.4s, v6.4s, v0.s[2] \n" "fmla v8.4s, v7.4s, v0.s[3] \n" "subs w4, w4, #1 \n" "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n" // w4 = remain = tiles & 3 "cmp w4, #0 \n" "beq 3f \n" "2: \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v4.4s}, [%4], #16 \n" "prfm pldl1keep, [%5, #32] \n" "ld1r {v0.4s}, [%5], #4 \n" "fmla v8.4s, v4.4s, v0.4s \n" "subs w4, w4, #1 \n" "bne 2b \n" "3: \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"); #else // __aarch64__ asm volatile( "veor q8, q8, q8 \n" // inch loop "lsr r4, %6, #2 \n" // r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n" "pld [%4, #512] \n" "vldm %4!, {d8-d15} \n" // "vld1.f32 {d8-d11}, [%4 :128]! \n" // "vld1.f32 {d12-d15}, [%4 :128]! \n" "pld [%5, #128] \n" "vld1.f32 {d0-d1}, [%5 :128]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "1: \n" // remain loop "and r4, %6, #3 \n" // r4 = remain = tiles & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n" "pld [%4, #128] \n" "vld1.f32 {d8-d9}, [%4]! \n" "pld [%5, #32] \n" "vld1.f32 {d0[],d1[]}, [%5]! \n" "subs r4, r4, #1 \n" "vmla.f32 q8, q4, q0 \n" "bne 2b \n" "3: \n" "vst1.f32 {d16-d17}, [%0]! \n" : "=r"(output0_tm), // %0 "=r"(bb2p0), // %1 "=r"(ktm0) // %2 : "0"(output0_tm), "1"(bb2p0), "2"(ktm0), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; for (int q = 0; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; sum1 += bb2p0[1] * ktm0[0]; sum2 += bb2p0[2] * ktm0[0]; sum3 += bb2p0[3] * ktm0[0]; bb2p0 += 4; ktm0 += 1; } output0_tm[0] = sum0; output0_tm[1] = sum1; output0_tm[2] = sum2; output0_tm[3] = sum3; output0_tm += 4; #endif // __ARM_NEON } for (; i < tiles; i++) { const float* bb2p0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4); const float* ktm0 = kernel_tm0.row(r); int q = 0; #if __ARM_NEON float32x4_t _sum0 = vdupq_n_f32(0.f); for (; q + 3 < inch; q += 4) { // asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :); float32x4_t _bb2p0 = vld1q_f32(bb2p0); bb2p0 += 4; float32x4_t _ktm0 = vld1q_f32(ktm0); ktm0 += 4; _sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0); } #if __aarch64__ float sum0 = vaddvq_f32(_sum0); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float sum0 = vget_lane_f32(vpadd_f32(_ss0, _ss0), 0); #endif // __aarch64__ #else float sum0 = 0.f; #endif for (; q < inch; q++) { sum0 += bb2p0[0] * ktm0[0]; bb2p0 += 1; ktm0 += 1; } output0_tm[0] = sum0; output0_tm += 1; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #if __ARM_NEON const float coeff[4] = {4.f, 8.f, 16.f, 32.f}; float32x4_t _coeff = vld1q_f32(coeff); #endif // __ARM_NEON int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; #if __ARM_NEON float32x2_t _bias0 = vdup_n_f32(bias0); #endif // __ARM_NEON float tmp[6][8]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { #if __ARM_NEON #if __aarch64__ const float* output0_tm0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm1 = out0_tm.row(i * w_tm / 8 + j + tiles * 8); const float* output0_tm2 = out0_tm.row(i * w_tm / 8 + j + tiles * 16); const float* output0_tm3 = out0_tm.row(i * w_tm / 8 + j + tiles * 24); for (int m = 0; m + 3 < 8; m += 4) { float32x4_t _output0_tm_00 = {}; float32x4_t _output0_tm_11 = {}; float32x4_t _output0_tm_22 = {}; float32x4_t _output0_tm_33 = {}; float32x4_t _output0_tm_44 = {}; float32x4_t _output0_tm_55 = {}; float32x4_t _output0_tm_66 = {}; float32x4_t _output0_tm_77 = {}; _output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0); output0_tm0 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1); output0_tm1 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2); output0_tm2 += out0_tm.w * tiles; _output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3); output0_tm3 += out0_tm.w * tiles; _output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0); _output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1); _output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2); _output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3); float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22); float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44); float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66); float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a); _tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1); _tmp0 = vaddq_f32(_tmp0, _tmp024b); float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1); float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _tmp4 = vaddq_f32(_tmp4, _tmp024c); _tmp4 = vaddq_f32(_tmp4, _tmp024c); vst1q_f32(&tmp[0][m], _tmp0); vst1q_f32(&tmp[2][m], _tmp2); vst1q_f32(&tmp[4][m], _tmp4); float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _tmp1 = vaddq_f32(_tmp1, _tmp135b); _tmp1 = vaddq_f32(_tmp1, _tmp135b); float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0); float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a); _tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1); _tmp5 = vaddq_f32(_tmp5, _tmp135c); vst1q_f32(&tmp[1][m], _tmp1); vst1q_f32(&tmp[3][m], _tmp3); vst1q_f32(&tmp[5][m], _tmp5); output0_tm0 += out0_tm.w * tiles * 25; output0_tm1 += out0_tm.w * tiles * 25; output0_tm2 += out0_tm.w * tiles * 25; output0_tm3 += out0_tm.w * tiles * 25; } const float* t0 = tmp[0]; const float* t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; for (int m = 0; m + 1 < 6; m += 2) { float32x4_t _t0_0123 = vld1q_f32(t0); float32x4_t _t0_4567 = vld1q_f32(t0 + 4); float32x4_t _t1_0123 = vld1q_f32(t1); float32x4_t _t1_4567 = vld1q_f32(t1 + 4); float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123); float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567); float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]); float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]); float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]); float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]); float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]); float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]); float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]); float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]); float32x2_t _tmp024a = vadd_f32(_t_11, _t_22); float32x2_t _tmp135a = vsub_f32(_t_11, _t_22); float32x2_t _tmp024b = vadd_f32(_t_33, _t_44); float32x2_t _tmp135b = vsub_f32(_t_33, _t_44); float32x2_t _tmp024c = vadd_f32(_t_55, _t_66); float32x2_t _tmp135c = vsub_f32(_t_55, _t_66); float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a); _output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1); _output_0 = vadd_f32(_output_0, _tmp024b); _output_0 = vadd_f32(_output_0, _bias0); float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0); _output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1); _output_2 = vadd_f32(_output_2, _bias0); float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _tmp024c); _output_4 = vadd_f32(_output_4, _bias0); output0[0] = vget_lane_f32(_output_0, 0); output1[0] = vget_lane_f32(_output_0, 1); output0[2] = vget_lane_f32(_output_2, 0); output1[2] = vget_lane_f32(_output_2, 1); output0[4] = vget_lane_f32(_output_4, 0); output1[4] = vget_lane_f32(_output_4, 1); float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _tmp135b); _output_1 = vadd_f32(_output_1, _bias0); float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1); _output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0); _output_3 = vadd_f32(_output_3, _bias0); float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a); _output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1); _output_5 = vadd_f32(_output_5, _tmp135c); _output_5 = vadd_f32(_output_5, _bias0); output0[1] = vget_lane_f32(_output_1, 0); output1[1] = vget_lane_f32(_output_1, 1); output0[3] = vget_lane_f32(_output_3, 0); output1[3] = vget_lane_f32(_output_3, 1); output0[5] = vget_lane_f32(_output_5, 0); output1[5] = vget_lane_f32(_output_5, 1); t0 += 8 * 2; t1 += 8 * 2; output0 += outw * 2; output1 += outw * 2; } #else // __aarch64__ const float* output0_tm0_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm1_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 8); const float* output0_tm2_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 16); const float* output0_tm3_0 = out0_tm.row(i * w_tm / 8 + j + tiles * 24); const float* output0_tm0_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 32); const float* output0_tm1_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 40); const float* output0_tm2_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 48); const float* output0_tm3_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 56); float* t0 = tmp[0]; float* t1 = tmp[1]; // int step = out0_tm.w * tiles * 2*4 *4; int step = out0_tm.w * tiles * 4; asm volatile( // loop0 // "vld1.f32 {d16-d17}, [%2], %21 \n" // "vld1.f32 {d18-d19}, [%3], %21 \n" // "vld1.f32 {d20-d21}, [%4], %21 \n" // "vld1.f32 {d22-d23}, [%5], %21 \n" // "vld1.f32 {d24-d25}, [%6], %21 \n" // "vld1.f32 {d26-d27}, [%7], %21 \n" // "vld1.f32 {d28-d29}, [%8], %21 \n" // "vld1.f32 {d30-d31}, [%9], %21 \n" // "vtrn.32 q8, q10 \n" // "vtrn.32 q9, q11 \n" // "vtrn.32 q12, q14 \n" // "vtrn.32 q13, q15 \n" // "vswp d17, d24 \n" // "vswp d19, d26 \n" // "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 // "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vld1.f32 {d16[0]}, [%2], %21 \n" "vld1.f32 {d16[1]}, [%3], %21 \n" "vld1.f32 {d17[0]}, [%4], %21 \n" "vld1.f32 {d17[1]}, [%5], %21 \n" "vld1.f32 {d20[0]}, [%2], %21 \n" "vld1.f32 {d20[1]}, [%3], %21 \n" "vld1.f32 {d21[0]}, [%4], %21 \n" "vld1.f32 {d21[1]}, [%5], %21 \n" "vld1.f32 {d24[0]}, [%2], %21 \n" "vld1.f32 {d24[1]}, [%3], %21 \n" "vld1.f32 {d25[0]}, [%4], %21 \n" "vld1.f32 {d25[1]}, [%5], %21 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%2], %21 \n" "vld1.f32 {d28[1]}, [%3], %21 \n" "vld1.f32 {d29[0]}, [%4], %21 \n" "vld1.f32 {d29[1]}, [%5], %21 \n" "vld1.f32 {d18[0]}, [%2], %21 \n" "vld1.f32 {d18[1]}, [%3], %21 \n" "vld1.f32 {d19[0]}, [%4], %21 \n" "vld1.f32 {d19[1]}, [%5], %21 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%2], %21 \n" "vld1.f32 {d22[1]}, [%3], %21 \n" "vld1.f32 {d23[0]}, [%4], %21 \n" "vld1.f32 {d23[1]}, [%5], %21 \n" "vld1.f32 {d26[0]}, [%2], %21 \n" "vld1.f32 {d26[1]}, [%3], %21 \n" "vld1.f32 {d27[0]}, [%4], %21 \n" "vld1.f32 {d27[1]}, [%5], %21 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%2] \n" "vld1.f32 {d30[1]}, [%3] \n" "vld1.f32 {d31[0]}, [%4] \n" "vld1.f32 {d31[1]}, [%5] \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "sub %0, %0, #112 \n" "vst1.f32 {d30-d31}, [%1] \n" "sub %1, %1, #112 \n" // loop1 // "vld1.f32 {d16-d17}, [%2] \n" // "vld1.f32 {d18-d19}, [%3] \n" // "vld1.f32 {d20-d21}, [%4] \n" // "vld1.f32 {d22-d23}, [%5] \n" // "vld1.f32 {d24-d25}, [%6] \n" // "vld1.f32 {d26-d27}, [%7] \n" // "vld1.f32 {d28-d29}, [%8] \n" // "vld1.f32 {d30-d31}, [%9] \n" // "vtrn.32 q8, q10 \n" // "vtrn.32 q9, q11 \n" // "vtrn.32 q12, q14 \n" // "vtrn.32 q13, q15 \n" // "vswp d17, d24 \n" // "vswp d19, d26 \n" // "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55 // "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77 "vld1.f32 {d16[0]}, [%6], %21 \n" "vld1.f32 {d16[1]}, [%7], %21 \n" "vld1.f32 {d17[0]}, [%8], %21 \n" "vld1.f32 {d17[1]}, [%9], %21 \n" "vld1.f32 {d20[0]}, [%6], %21 \n" "vld1.f32 {d20[1]}, [%7], %21 \n" "vld1.f32 {d21[0]}, [%8], %21 \n" "vld1.f32 {d21[1]}, [%9], %21 \n" "vld1.f32 {d24[0]}, [%6], %21 \n" "vld1.f32 {d24[1]}, [%7], %21 \n" "vld1.f32 {d25[0]}, [%8], %21 \n" "vld1.f32 {d25[1]}, [%9], %21 \n" "vadd.f32 q2, q10, q12 \n" "vsub.f32 q3, q10, q12 \n" "vld1.f32 {d28[0]}, [%6], %21 \n" "vld1.f32 {d28[1]}, [%7], %21 \n" "vld1.f32 {d29[0]}, [%8], %21 \n" "vld1.f32 {d29[1]}, [%9], %21 \n" "vld1.f32 {d18[0]}, [%6], %21 \n" "vld1.f32 {d18[1]}, [%7], %21 \n" "vld1.f32 {d19[0]}, [%8], %21 \n" "vld1.f32 {d19[1]}, [%9], %21 \n" "vadd.f32 q4, q14, q9 \n" "vsub.f32 q5, q14, q9 \n" "vld1.f32 {d22[0]}, [%6], %21 \n" "vld1.f32 {d22[1]}, [%7], %21 \n" "vld1.f32 {d23[0]}, [%8], %21 \n" "vld1.f32 {d23[1]}, [%9], %21 \n" "vld1.f32 {d26[0]}, [%6], %21 \n" "vld1.f32 {d26[1]}, [%7], %21 \n" "vld1.f32 {d27[0]}, [%8], %21 \n" "vld1.f32 {d27[1]}, [%9], %21 \n" "vadd.f32 q6, q11, q13 \n" "vsub.f32 q7, q11, q13 \n" // spare q9 q10 q11 q12 q13 q14 "vld1.f32 {d30[0]}, [%6] \n" "vld1.f32 {d30[1]}, [%7] \n" "vld1.f32 {d31[0]}, [%8] \n" "vld1.f32 {d31[1]}, [%9] \n" "vmov q9, q3 \n" "vadd.f32 q8, q8, q2 \n" "vmla.f32 q9, q7, %f20[0] \n" "vmov q12, q2 \n" "vmov q10, q2 \n" "vmov q11, q3 \n" "vmla.f32 q12, q4, %f20[0] \n" "vadd.f32 q15, q15, q3 \n" "vmla.f32 q8, q6, %f20[1] \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q4, %e20[0] \n" "vmla.f32 q11, q5, %e20[1] \n" "vadd.f32 q12, q12, q6 \n" "vmla.f32 q15, q5, %f20[1] \n" "vadd.f32 q8, q8, q4 \n" "vadd.f32 q9, q9, q5 \n" "vmla.f32 q10, q6, %e20[1] \n" "vmla.f32 q11, q7, %e20[0] \n" "vadd.f32 q12, q12, q6 \n" "vadd.f32 q15, q15, q7 \n" "vst1.f32 {d16-d17}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d18-d19}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d20-d21}, [%0] \n" "add %0, %0, #64 \n" "vst1.f32 {d22-d23}, [%1] \n" "add %1, %1, #64 \n" "vst1.f32 {d24-d25}, [%0] \n" "vst1.f32 {d30-d31}, [%1] \n" : "=r"(t0), // %0 "=r"(t1), // %1 "=r"(output0_tm0_0), // %2 "=r"(output0_tm1_0), // %3 "=r"(output0_tm2_0), // %4 "=r"(output0_tm3_0), // %5 "=r"(output0_tm0_4), // %6 "=r"(output0_tm1_4), // %7 "=r"(output0_tm2_4), // %8 "=r"(output0_tm3_4) // %9 : "0"(t0), "1"(t1), "2"(output0_tm0_0), "3"(output0_tm1_0), "4"(output0_tm2_0), "5"(output0_tm3_0), "6"(output0_tm0_4), "7"(output0_tm1_4), "8"(output0_tm2_4), "9"(output0_tm3_4), "w"(_coeff), // %20 "r"(step) // %21 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); t0 = tmp[0]; t1 = tmp[1]; float* output0 = out0.row(i * 6) + j * 6; float* output1 = output0 + outw; int stepw = outw * 2 * 4; asm volatile( // loop0 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop1 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" // loop2 "vld1.f32 {d16-d19}, [%2] \n" "vld1.f32 {d20-d23}, [%3] \n" "add %2, %2, #64 \n" "add %3, %3, #64 \n" "vtrn.32 q8, q10 \n" // q8 = 0 2 q10 = 1 3 "vtrn.32 q9, q11 \n" // q9 = 4 6 q11 = 5 7 "vadd.f32 d4, d20, d17 \n" "vsub.f32 d5, d20, d17 \n" "vadd.f32 d6, d21, d18 \n" "vsub.f32 d7, d21, d18 \n" "vadd.f32 d8, d22, d19 \n" "vsub.f32 d9, d22, d19 \n" // spare d17 ~ d22 "vmov d20, d5 \n" "vmov d18, d4 \n" "vadd.f32 d16, d16, d4 \n" "vmla.f32 d20, d9, %f8[0] \n" "vmov d17, d4 \n" "vmov d21, d5 \n" "vmla.f32 d18, d6, %f8[0] \n" "vadd.f32 d22, d23, d5 \n" "vmla.f32 d16, d8, %f8[1] \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d6, %e8[0] \n" "vmla.f32 d21, d7, %e8[1] \n" "vadd.f32 d18, d18, d8 \n" "vmla.f32 d22, d7, %f8[1] \n" "vadd.f32 d16, d16, d6 \n" "vadd.f32 d20, d20, d7 \n" "vmla.f32 d17, d8, %e8[1] \n" "vmla.f32 d21, d9, %e8[0] \n" "vadd.f32 d18, d18, d8 \n" "vadd.f32 d22, d22, d9 \n" "vadd.f32 d16, d16, %P9 \n" // _bias0 "vadd.f32 d20, d20, %P9 \n" // _bias0 "vadd.f32 d17, d17, %P9 \n" // _bias0 "vadd.f32 d21, d21, %P9 \n" // _bias0 "vadd.f32 d18, d18, %P9 \n" // _bias0 "vadd.f32 d22, d22, %P9 \n" // _bias0 "vtrn.f32 q8, q10 \n" "vtrn.f32 d18, d22 \n" "vst1.f32 {d16-d18}, [%0], %10 \n" "vst1.f32 {d20-d22}, [%1], %10 \n" : "=r"(output0), // %0 "=r"(output1), // %1 "=r"(t0), // %2 "=r"(t1) // %3 : "0"(output0), "1"(output1), "2"(t0), "3"(t1), "w"(_coeff), // %8 "w"(_bias0), // %9 "r"(stepw) // %10 : "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else const float* output0_tm_0 = out0_tm.row(i * w_tm / 8 + j); const float* output0_tm_1 = out0_tm.row(i * w_tm / 8 + j + tiles); const float* output0_tm_2 = out0_tm.row(i * w_tm / 8 + j + tiles * 2); const float* output0_tm_3 = out0_tm.row(i * w_tm / 8 + j + tiles * 3); const float* output0_tm_4 = out0_tm.row(i * w_tm / 8 + j + tiles * 4); const float* output0_tm_5 = out0_tm.row(i * w_tm / 8 + j + tiles * 5); const float* output0_tm_6 = out0_tm.row(i * w_tm / 8 + j + tiles * 6); const float* output0_tm_7 = out0_tm.row(i * w_tm / 8 + j + tiles * 7); for (int m = 0; m < 8; m++) { float tmp024a = output0_tm_1[0] + output0_tm_2[0]; float tmp135a = output0_tm_1[0] - output0_tm_2[0]; float tmp024b = output0_tm_3[0] + output0_tm_4[0]; float tmp135b = output0_tm_3[0] - output0_tm_4[0]; float tmp024c = output0_tm_5[0] + output0_tm_6[0]; float tmp135c = output0_tm_5[0] - output0_tm_6[0]; tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32; tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += out0_tm.w * tiles * 8; output0_tm_1 += out0_tm.w * tiles * 8; output0_tm_2 += out0_tm.w * tiles * 8; output0_tm_3 += out0_tm.w * tiles * 8; output0_tm_4 += out0_tm.w * tiles * 8; output0_tm_5 += out0_tm.w * tiles * 8; output0_tm_6 += out0_tm.w * tiles * 8; output0_tm_7 += out0_tm.w * tiles * 8; } float* output0 = out0.row(i * 6) + j * 6; for (int m = 0; m < 6; m++) { const float* tmp0 = tmp[m]; float tmp024a = tmp0[1] + tmp0[2]; float tmp135a = tmp0[1] - tmp0[2]; float tmp024b = tmp0[3] + tmp0[4]; float tmp135b = tmp0[3] - tmp0[4]; float tmp024c = tmp0[5] + tmp0[6]; float tmp135c = tmp0[5] - tmp0[6]; output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw; } #endif // __ARM_NEON } } } } // END transform output // cut result pad if (top_blob_bordered.w != top_blob.w || top_blob_bordered.h != top_blob.h) copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 1; int remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p + 1); const float bias0 = bias ? bias[p] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; out0.fill(bias0); out1.fill(bias1); const float* k0 = kernel + p * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; #if __ARM_NEON float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k03 = vld1q_f32(k0 + 3); float32x4_t _k06 = vld1q_f32(k0 + 6); float32x4_t _k10 = vld1q_f32(k1); float32x4_t _k13 = vld1q_f32(k1 + 3); float32x4_t _k16 = vld1q_f32(k1 + 6); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0 "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v6.4s}, [%1] \n" // v6 = _sum0 "fmul v12.4s, v8.4s, %12.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v7.4s}, [%2] \n" // v7 = _sum1 "fmul v13.4s, v8.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld2 {v10.4s, v11.4s}, [%3] \n" // v10 "fmla v6.4s, v9.4s, %12.s[1] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v7.4s, v9.4s, %15.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4], #32 \n" // r1 "fmla v12.4s, v14.4s, %12.s[2] \n" "fmla v13.4s, v14.4s, %15.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld2 {v10.4s, v11.4s}, [%4] \n" "fmla v6.4s, v8.4s, %13.s[0] \n" "fmla v7.4s, v8.4s, %16.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v12.4s, v9.4s, %13.s[1] \n" "fmla v13.4s, v9.4s, %16.s[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v8.4s, v9.4s}, [%5], #32 \n" // r2 "fmla v6.4s, v14.4s, %13.s[2] \n" "fmla v7.4s, v14.4s, %16.s[2] \n" "prfm pldl1keep, [%5, #128] \n" "ld2 {v10.4s, v11.4s}, [%5] \n" "fmla v12.4s, v8.4s, %14.s[0] \n" "fmla v13.4s, v8.4s, %17.s[0] \n" "ext v14.16b, v8.16b, v10.16b, #4\n" "fmla v6.4s, v9.4s, %14.s[1] \n" "fmla v7.4s, v9.4s, %17.s[1] \n" "fmla v12.4s, v14.4s, %14.s[2] \n" "fmla v13.4s, v14.4s, %17.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3], #32 \n" // v8 v9 = r0 "fadd v6.4s, v6.4s, v12.4s \n" "fadd v7.4s, v7.4s, v13.4s \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v7.4s}, [%2], #16 \n" "bne 0b \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0 "0: \n" "pld [%1, #128] \n" "vld1.f32 {d12-d13}, [%1] \n" // q6 = _sum0 "vmul.f32 q12, q8, %e12[0] \n" "pld [%2, #128] \n" "vld1.f32 {d14-d15}, [%2] \n" // q7 = _sum1 "vmul.f32 q13, q8, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d20-d21}, [%3] \n" // q10 "vmla.f32 q6, q9, %e12[1] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q7, q9, %e15[1] \n" "pld [%4, #256] \n" "vld2.f32 {d16-d19}, [%4]! \n" // r1 "vmla.f32 q12, q11, %f12[0] \n" "vmla.f32 q13, q11, %f15[0] \n" "pld [%4, #128] \n" "vld2.f32 {d20-d21}, [%4] \n" "vmla.f32 q6, q8, %e13[0] \n" "vmla.f32 q7, q8, %e16[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q12, q9, %e13[1] \n" "vmla.f32 q13, q9, %e16[1] \n" "pld [%5, #256] \n" "vld2.f32 {d16-d19}, [%5]! \n" // r2 "vmla.f32 q6, q11, %f13[0] \n" "vmla.f32 q7, q11, %f16[0] \n" "pld [%5, #128] \n" "vld2.f32 {d20-d21}, [%5] \n" "vmla.f32 q12, q8, %e14[0] \n" "vmla.f32 q13, q8, %e17[0] \n" "vext.32 q11, q8, q10, #1 \n" "vmla.f32 q6, q9, %e14[1] \n" "vmla.f32 q7, q9, %e17[1] \n" "vmla.f32 q12, q11, %f14[0] \n" "vmla.f32 q13, q11, %f17[0] \n" "pld [%3, #256] \n" "vld2.f32 {d16-d19}, [%3]! \n" // q8 q9 = r0 "vadd.f32 q6, q6, q12 \n" "vadd.f32 q7, q7, q13 \n" "subs %0, #1 \n" "vst1.f32 {d12-d13}, [%1]! \n" "vst1.f32 {d14-d15}, [%2]! \n" "bne 0b \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00), // %12 "w"(_k03), // %13 "w"(_k06), // %14 "w"(_k10), // %15 "w"(_k13), // %16 "w"(_k16) // %17 : "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum0 = vmulq_f32(_r00, _k00); float32x4_t _sum1 = vmulq_f32(_r00, _k10); _sum0 = vmlaq_f32(_sum0, _r10, _k03); _sum1 = vmlaq_f32(_sum1, _r10, _k13); _sum0 = vmlaq_f32(_sum0, _r20, _k06); _sum1 = vmlaq_f32(_sum1, _r20, _k16); _sum0 = vsetq_lane_f32(*outptr0, _sum0, 3); _sum1 = vsetq_lane_f32(*outptr1, _sum1, 3); #if __aarch64__ *outptr0 = vaddvq_f32(_sum0); *outptr1 = vaddvq_f32(_sum1); #else float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0)); float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1)); float32x2_t _ss01 = vpadd_f32(_ss0, _ss1); *outptr0 = vget_lane_f32(_ss01, 0); *outptr1 = vget_lane_f32(_ss01, 1); #endif // __aarch64__ #else float sum0 = 0.f; float sum1 = 0.f; sum0 += r0[0] * k0[0]; sum0 += r0[1] * k0[1]; sum0 += r0[2] * k0[2]; sum0 += r1[0] * k0[3]; sum0 += r1[1] * k0[4]; sum0 += r1[2] * k0[5]; sum0 += r2[0] * k0[6]; sum0 += r2[1] * k0[7]; sum0 += r2[2] * k0[8]; sum1 += r0[0] * k1[0]; sum1 += r0[1] * k1[1]; sum1 += r0[2] * k1[2]; sum1 += r1[0] * k1[3]; sum1 += r1[1] * k1[4]; sum1 += r1[2] * k1[5]; sum1 += r2[0] * k1[6]; sum1 += r2[1] * k1[7]; sum1 += r2[2] * k1[8]; *outptr0 += sum0; *outptr1 += sum1; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr0++; outptr1++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9; k1 += 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* kernel0 = kernel + p * inch * 9; for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } } } static void conv3x3s2_transform_kernel_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(8 * 9, inch, outch / 8 + outch % 8); const float* kernel = _kernel; int p = 0; for (; p + 7 < outch; p += 8) { const float* k0 = kernel + (p + 0) * inch * 9; const float* k1 = kernel + (p + 1) * inch * 9; const float* k2 = kernel + (p + 2) * inch * 9; const float* k3 = kernel + (p + 3) * inch * 9; const float* k4 = kernel + (p + 4) * inch * 9; const float* k5 = kernel + (p + 5) * inch * 9; const float* k6 = kernel + (p + 6) * inch * 9; const float* k7 = kernel + (p + 7) * inch * 9; float* ktmp = kernel_tm.channel(p / 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[0] = k0[k]; ktmp[1] = k1[k]; ktmp[2] = k2[k]; ktmp[3] = k3[k]; ktmp[4] = k4[k]; ktmp[5] = k5[k]; ktmp[6] = k6[k]; ktmp[7] = k7[k]; ktmp += 8; } k0 += 9; k1 += 9; k2 += 9; k3 += 9; k4 += 9; k5 += 9; k6 += 9; k7 += 9; } } for (; p < outch; p++) { const float* k0 = kernel + (p + 0) * inch * 9; float* ktmp = kernel_tm.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { for (int k = 0; k < 9; k++) { ktmp[k] = k0[k]; } ktmp += 9; k0 += 9; } } } static void conv3x3s2_packed_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; // const float* kernel = _kernel; const float* bias = _bias; int nn_outch = outch >> 3; int remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; Mat out0 = top_blob.channel(p + 0); Mat out1 = top_blob.channel(p + 1); Mat out2 = top_blob.channel(p + 2); Mat out3 = top_blob.channel(p + 3); Mat out4 = top_blob.channel(p + 4); Mat out5 = top_blob.channel(p + 5); Mat out6 = top_blob.channel(p + 6); Mat out7 = top_blob.channel(p + 7); const float bias0 = bias ? bias[p + 0] : 0.f; const float bias1 = bias ? bias[p + 1] : 0.f; const float bias2 = bias ? bias[p + 2] : 0.f; const float bias3 = bias ? bias[p + 3] : 0.f; const float bias4 = bias ? bias[p + 4] : 0.f; const float bias5 = bias ? bias[p + 5] : 0.f; const float bias6 = bias ? bias[p + 6] : 0.f; const float bias7 = bias ? bias[p + 7] : 0.f; out0.fill(bias0); out1.fill(bias1); out2.fill(bias2); out3.fill(bias3); out4.fill(bias4); out5.fill(bias5); out6.fill(bias6); out7.fill(bias7); const float* ktmp = _kernel.channel(p / 8); for (int q = 0; q < inch; q++) { float* outptr0 = out0; float* outptr1 = out1; float* outptr2 = out2; float* outptr3 = out3; float* outptr4 = out4; float* outptr5 = out5; float* outptr6 = out6; float* outptr7 = out7; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v8.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v9.4s}, [%2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v10.4s}, [%3] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v11.4s}, [%4] \n" /// "prfm pldl1keep, [%9, #256] \n" "ld2 {v4.4s, v5.4s}, [%9], #32 \n" // v4=00 v5=01 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v12.4s}, [%5] \n" "prfm pldl1keep, [%6, #128] \n" "ld1 {v13.4s}, [%6] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "prfm pldl1keep, [%7, #128] \n" "ld1 {v14.4s}, [%7] \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v15.4s}, [%8] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%9, #256] \n" "ld2 {v6.4s, v7.4s}, [%9] \n" // v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=02 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" /// "prfm pldl1keep, [%10, #256] \n" "ld2 {v4.4s, v5.4s}, [%10], #32 \n" // v4=10 v5=11 "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "prfm pldl1keep, [%10, #256] \n" "ld2 {v6.4s, v7.4s}, [%10] \n" // v6 "fmla v8.4s, v5.4s, v0.s[0] \n" "fmla v9.4s, v5.4s, v0.s[1] \n" "fmla v10.4s, v5.4s, v0.s[2] \n" "fmla v11.4s, v5.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=12 "fmla v12.4s, v5.4s, v1.s[0] \n" "fmla v13.4s, v5.4s, v1.s[1] \n" "fmla v14.4s, v5.4s, v1.s[2] \n" "fmla v15.4s, v5.4s, v1.s[3] \n" /// "prfm pldl1keep, [%11, #256] \n" "ld2 {v4.4s, v5.4s}, [%11], #32 \n" // v4=20 v5=21 "fmla v8.4s, v6.4s, v2.s[0] \n" "fmla v9.4s, v6.4s, v2.s[1] \n" "fmla v10.4s, v6.4s, v2.s[2] \n" "fmla v11.4s, v6.4s, v2.s[3] \n" "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v6.4s, v3.s[0] \n" "fmla v13.4s, v6.4s, v3.s[1] \n" "fmla v14.4s, v6.4s, v3.s[2] \n" "fmla v15.4s, v6.4s, v3.s[3] \n" "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "ld1 {v2.4s, v3.4s}, [%12], #32 \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "prfm pldl1keep, [%11, #256] \n" "ld2 {v6.4s, v7.4s}, [%11] \n" // v6 "fmla v8.4s, v5.4s, v2.s[0] \n" "fmla v9.4s, v5.4s, v2.s[1] \n" "fmla v10.4s, v5.4s, v2.s[2] \n" "fmla v11.4s, v5.4s, v2.s[3] \n" "ext v6.16b, v4.16b, v6.16b, #4 \n" // v6=22 "ld1 {v0.4s, v1.4s}, [%12], #32 \n" "fmla v12.4s, v5.4s, v3.s[0] \n" "fmla v13.4s, v5.4s, v3.s[1] \n" "fmla v14.4s, v5.4s, v3.s[2] \n" "fmla v15.4s, v5.4s, v3.s[3] \n" "fmla v8.4s, v6.4s, v0.s[0] \n" "fmla v9.4s, v6.4s, v0.s[1] \n" "fmla v10.4s, v6.4s, v0.s[2] \n" "fmla v11.4s, v6.4s, v0.s[3] \n" "fmla v12.4s, v6.4s, v1.s[0] \n" "fmla v13.4s, v6.4s, v1.s[1] \n" "st1 {v8.4s}, [%1], #16 \n" "st1 {v9.4s}, [%2], #16 \n" "fmla v14.4s, v6.4s, v1.s[2] \n" "fmla v15.4s, v6.4s, v1.s[3] \n" "st1 {v10.4s}, [%3], #16 \n" "st1 {v11.4s}, [%4], #16 \n" "sub %12, %12, #288 \n" "st1 {v12.4s}, [%5], #16 \n" "st1 {v13.4s}, [%6], #16 \n" "subs %w0, %w0, #1 \n" "st1 {v14.4s}, [%7], #16 \n" "st1 {v15.4s}, [%8], #16 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #128] \n" "vld1.f32 {d16-d17}, [%1] \n" "pld [%2, #128] \n" "vld1.f32 {d18-d19}, [%2] \n" "pld [%3, #128] \n" "vld1.f32 {d20-d21}, [%3] \n" "pld [%4, #128] \n" "vld1.f32 {d22-d23}, [%4] \n" /// "pld [%9, #256] \n" "vld2.f32 {d8-d11}, [%9]! \n" // q4=00 q5=01 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "pld [%5, #128] \n" "vld1.f32 {d24-d25}, [%5] \n" "pld [%6, #128] \n" "vld1.f32 {d26-d27}, [%6] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "pld [%7, #128] \n" "vld1.f32 {d28-d29}, [%7] \n" "pld [%8, #128] \n" "vld1.f32 {d30-d31}, [%8] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%9, #128] \n" "vld2.f32 {d12-d13}, [%9] \n" // q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n" // q6=02 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" /// "pld [%10, #256] \n" "vld2.f32 {d8-d11}, [%10]! \n" // q4=10 q5=11 "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vmla.f32 q8, q4, d4[0] \n" "vmla.f32 q9, q4, d4[1] \n" "vmla.f32 q10, q4, d5[0] \n" "vmla.f32 q11, q4, d5[1] \n" "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q4, d6[0] \n" "vmla.f32 q13, q4, d6[1] \n" "vmla.f32 q14, q4, d7[0] \n" "vmla.f32 q15, q4, d7[1] \n" "pld [%10, #128] \n" "vld2.f32 {d12-d13}, [%10] \n" // q6 "vmla.f32 q8, q5, d0[0] \n" "vmla.f32 q9, q5, d0[1] \n" "vmla.f32 q10, q5, d1[0] \n" "vmla.f32 q11, q5, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vext.f32 q6, q4, q6, #1 \n" // q6=12 "vmla.f32 q12, q5, d2[0] \n" "vmla.f32 q13, q5, d2[1] \n" "vmla.f32 q14, q5, d3[0] \n" "vmla.f32 q15, q5, d3[1] \n" /// "pld [%11, #256] \n" "vld2.f32 {d8-d11}, [%11]! \n" // q4=20 q5=21 "vmla.f32 q8, q6, d4[0] \n" "vmla.f32 q9, q6, d4[1] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d5[1] \n" "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q6, d6[0] \n" "vmla.f32 q13, q6, d6[1] \n" "vmla.f32 q14, q6, d7[0] \n" "vmla.f32 q15, q6, d7[1] \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vld1.f32 {d4-d7}, [%12 :128]! \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "pld [%11, #128] \n" "vld2.f32 {d12-d13}, [%11] \n" // q6 "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vext.f32 q6, q4, q6, #1 \n" // q6=22 "vld1.f32 {d0-d3}, [%12 :128]! \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vst1.f32 {d16-d17}, [%1]! \n" "vst1.f32 {d18-d19}, [%2]! \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "vst1.f32 {d20-d21}, [%3]! \n" "vst1.f32 {d22-d23}, [%4]! \n" "sub %12, %12, #288 \n" "vst1.f32 {d24-d25}, [%5]! \n" "vst1.f32 {d26-d27}, [%6]! \n" "subs %0, #1 \n" "vst1.f32 {d28-d29}, [%7]! \n" "vst1.f32 {d30-d31}, [%8]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(r0), // %9 "=r"(r1), // %10 "=r"(r2), // %11 "=r"(ktmp) // %12 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(r0), "10"(r1), "11"(r2), "12"(ktmp) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ asm volatile( "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "prfm pldl1keep, [%8, #128] \n" "ld1 {v0.4s}, [%8] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "ld1 {v8.s}[0], [%0] \n" "ld1 {v8.s}[1], [%1] \n" "ld1 {v8.s}[2], [%2] \n" "ld1 {v8.s}[3], [%3] \n" "fmul v14.4s, v10.4s, v0.s[0] \n" "fmul v15.4s, v11.4s, v0.s[0] \n" "ld1 {v9.s}[0], [%4] \n" "ld1 {v9.s}[1], [%5] \n" "ld1 {v9.s}[2], [%6] \n" "ld1 {v9.s}[3], [%7] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v1.4s}, [%9] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[0] \n" "fmla v9.4s, v13.4s, v1.s[0] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v1.s[1] \n" "fmla v15.4s, v11.4s, v1.s[1] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v1.s[2] \n" "fmla v9.4s, v13.4s, v1.s[2] \n" "prfm pldl1keep, [%10, #128] \n" "ld1 {v0.4s}, [%10] \n" "ld1 {v12.4s, v13.4s}, [%11], #32 \n" "fmla v14.4s, v10.4s, v0.s[0] \n" "fmla v15.4s, v11.4s, v0.s[0] \n" "ld1 {v10.4s, v11.4s}, [%11], #32 \n" "fmla v8.4s, v12.4s, v0.s[1] \n" "fmla v9.4s, v13.4s, v0.s[1] \n" "fmla v14.4s, v10.4s, v0.s[2] \n" "fmla v15.4s, v11.4s, v0.s[2] \n" "fadd v8.4s, v8.4s, v14.4s \n" "fadd v9.4s, v9.4s, v15.4s \n" "sub %11, %11, #288 \n" "st1 {v8.s}[0], [%0], #4 \n" "st1 {v8.s}[1], [%1], #4 \n" "st1 {v8.s}[2], [%2], #4 \n" "st1 {v8.s}[3], [%3], #4 \n" "st1 {v9.s}[0], [%4], #4 \n" "st1 {v9.s}[1], [%5], #4 \n" "st1 {v9.s}[2], [%6], #4 \n" "st1 {v9.s}[3], [%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); #else // __aarch64__ asm volatile( "vld1.f32 {d20-d23}, [%11 :128]! \n" "pld [%8, #128] \n" "vld1.f32 {d0-d1}, [%8] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vld1.f32 {d16[0]}, [%0] \n" "vld1.f32 {d16[1]}, [%1] \n" "vld1.f32 {d17[0]}, [%2] \n" "vld1.f32 {d17[1]}, [%3] \n" "vmul.f32 q14, q10, d0[0] \n" "vmul.f32 q15, q11, d0[0] \n" "vld1.f32 {d18[0]}, [%4] \n" "vld1.f32 {d18[1]}, [%5] \n" "vld1.f32 {d19[0]}, [%6] \n" "vld1.f32 {d19[1]}, [%7] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "pld [%9, #128] \n" "vld1.f32 {d2-d3}, [%9] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d2[0] \n" "vmla.f32 q9, q13, d2[0] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d2[1] \n" "vmla.f32 q15, q11, d2[1] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d3[0] \n" "vmla.f32 q9, q13, d3[0] \n" "pld [%10, #128] \n" "vld1.f32 {d0-d1}, [%10] \n" "vld1.f32 {d24-d27}, [%11 :128]! \n" "vmla.f32 q14, q10, d0[0] \n" "vmla.f32 q15, q11, d0[0] \n" "vld1.f32 {d20-d23}, [%11 :128]! \n" "vmla.f32 q8, q12, d0[1] \n" "vmla.f32 q9, q13, d0[1] \n" "vmla.f32 q14, q10, d1[0] \n" "vmla.f32 q15, q11, d1[0] \n" "vadd.f32 q8, q8, q14 \n" "vadd.f32 q9, q9, q15 \n" "sub %11, %11, #288 \n" "vst1.f32 {d16[0]}, [%0]! \n" "vst1.f32 {d16[1]}, [%1]! \n" "vst1.f32 {d17[0]}, [%2]! \n" "vst1.f32 {d17[1]}, [%3]! \n" "vst1.f32 {d18[0]}, [%4]! \n" "vst1.f32 {d18[1]}, [%5]! \n" "vst1.f32 {d19[0]}, [%6]! \n" "vst1.f32 {d19[1]}, [%7]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(r0), // %8 "=r"(r1), // %9 "=r"(r2), // %10 "=r"(ktmp) // %11 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(r0), "9"(r1), "10"(r2), "11"(ktmp) : "memory", "q0", "q1", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else // __ARM_NEON float sum0 = 0.f; float sum1 = 0.f; float sum2 = 0.f; float sum3 = 0.f; float sum4 = 0.f; float sum5 = 0.f; float sum6 = 0.f; float sum7 = 0.f; sum0 += r0[0] * ktmp[0]; sum1 += r0[0] * ktmp[1]; sum2 += r0[0] * ktmp[2]; sum3 += r0[0] * ktmp[3]; sum4 += r0[0] * ktmp[4]; sum5 += r0[0] * ktmp[5]; sum6 += r0[0] * ktmp[6]; sum7 += r0[0] * ktmp[7]; ktmp += 8; sum0 += r0[1] * ktmp[0]; sum1 += r0[1] * ktmp[1]; sum2 += r0[1] * ktmp[2]; sum3 += r0[1] * ktmp[3]; sum4 += r0[1] * ktmp[4]; sum5 += r0[1] * ktmp[5]; sum6 += r0[1] * ktmp[6]; sum7 += r0[1] * ktmp[7]; ktmp += 8; sum0 += r0[2] * ktmp[0]; sum1 += r0[2] * ktmp[1]; sum2 += r0[2] * ktmp[2]; sum3 += r0[2] * ktmp[3]; sum4 += r0[2] * ktmp[4]; sum5 += r0[2] * ktmp[5]; sum6 += r0[2] * ktmp[6]; sum7 += r0[2] * ktmp[7]; ktmp += 8; sum0 += r1[0] * ktmp[0]; sum1 += r1[0] * ktmp[1]; sum2 += r1[0] * ktmp[2]; sum3 += r1[0] * ktmp[3]; sum4 += r1[0] * ktmp[4]; sum5 += r1[0] * ktmp[5]; sum6 += r1[0] * ktmp[6]; sum7 += r1[0] * ktmp[7]; ktmp += 8; sum0 += r1[1] * ktmp[0]; sum1 += r1[1] * ktmp[1]; sum2 += r1[1] * ktmp[2]; sum3 += r1[1] * ktmp[3]; sum4 += r1[1] * ktmp[4]; sum5 += r1[1] * ktmp[5]; sum6 += r1[1] * ktmp[6]; sum7 += r1[1] * ktmp[7]; ktmp += 8; sum0 += r1[2] * ktmp[0]; sum1 += r1[2] * ktmp[1]; sum2 += r1[2] * ktmp[2]; sum3 += r1[2] * ktmp[3]; sum4 += r1[2] * ktmp[4]; sum5 += r1[2] * ktmp[5]; sum6 += r1[2] * ktmp[6]; sum7 += r1[2] * ktmp[7]; ktmp += 8; sum0 += r2[0] * ktmp[0]; sum1 += r2[0] * ktmp[1]; sum2 += r2[0] * ktmp[2]; sum3 += r2[0] * ktmp[3]; sum4 += r2[0] * ktmp[4]; sum5 += r2[0] * ktmp[5]; sum6 += r2[0] * ktmp[6]; sum7 += r2[0] * ktmp[7]; ktmp += 8; sum0 += r2[1] * ktmp[0]; sum1 += r2[1] * ktmp[1]; sum2 += r2[1] * ktmp[2]; sum3 += r2[1] * ktmp[3]; sum4 += r2[1] * ktmp[4]; sum5 += r2[1] * ktmp[5]; sum6 += r2[1] * ktmp[6]; sum7 += r2[1] * ktmp[7]; ktmp += 8; sum0 += r2[2] * ktmp[0]; sum1 += r2[2] * ktmp[1]; sum2 += r2[2] * ktmp[2]; sum3 += r2[2] * ktmp[3]; sum4 += r2[2] * ktmp[4]; sum5 += r2[2] * ktmp[5]; sum6 += r2[2] * ktmp[6]; sum7 += r2[2] * ktmp[7]; ktmp += 8; *outptr0 += sum0; *outptr1 += sum1; *outptr2 += sum2; *outptr3 += sum3; *outptr4 += sum4; *outptr5 += sum5; *outptr6 += sum6; *outptr7 += sum7; ktmp -= 8 * 9; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 8 * 9; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); const float* ktmp = _kernel.channel(p / 8 + p % 8); for (int q = 0; q < inch; q++) { float* outptr = out; const float* img0 = bottom_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = ktmp; const float* k1 = ktmp + 3; const float* k2 = ktmp + 6; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(k0); float32x4_t _k3456 = vld1q_f32(k1); float32x4_t _k6789 = vld1q_f32(k2); #endif // __ARM_NEON int i = 0; for (; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw & 3; #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "0: \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmla v0.4s, v2.4s, %10.s[0] \n" "fmul v10.4s, v3.4s, %10.s[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmul v11.4s, v1.4s, %10.s[2] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v2.4s, v3.4s}, [%3], #32 \n" "fmla v0.4s, v2.4s, %11.s[0] \n" "fmla v10.4s, v3.4s, %11.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v8.4s, v9.4s}, [%3] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %11.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v2.4s, v3.4s}, [%4], #32 \n" "fmla v0.4s, v2.4s, %12.s[0] \n" "fmla v10.4s, v3.4s, %12.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v8.4s, v9.4s}, [%4] \n" "ext v1.16b, v2.16b, v8.16b, #4 \n" "fmla v11.4s, v1.4s, %12.s[2] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "fadd v0.4s, v0.4s, v10.4s \n" "fadd v0.4s, v0.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "st1 {v0.4s}, [%1], #16 \n" "bne 0b \n" "sub %2, %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"); } #else if (nn > 0) { asm volatile( "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "0: \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmla.f32 q0, q2, %e10[0] \n" "vmul.f32 q10, q3, %e10[1] \n" "pld [%2, #128] \n" "vld2.f32 {d16-d17}, [%2] \n" "vext.32 q1, q2, q8, #1 \n" "vmul.f32 q11, q1, %f10[0] \n" "pld [%3, #256] \n" "vld2.f32 {d4-d7}, [%3]! \n" "vmla.f32 q0, q2, %e11[0] \n" "vmla.f32 q10, q3, %e11[1] \n" "pld [%3, #128] \n" "vld2.f32 {d16-d17}, [%3] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f11[0] \n" "pld [%4, #256] \n" "vld2.f32 {d4-d7}, [%4]! \n" "vmla.f32 q0, q2, %e12[0] \n" "vmla.f32 q10, q3, %e12[1] \n" "pld [%4, #128] \n" "vld2.f32 {d16-d17}, [%4] \n" "vext.32 q1, q2, q8, #1 \n" "vmla.f32 q11, q1, %f12[0] \n" "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "vadd.f32 q0, q0, q10 \n" "vadd.f32 q0, q0, q11 \n" "subs %0, #1 \n" "vst1.f32 {d0-d1}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "w"(_k0123), // %10 "w"(_k3456), // %11 "w"(_k6789) // %12 : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _sum = vmulq_f32(_r00, _k0123); _sum = vmlaq_f32(_sum, _r10, _k3456); _sum = vmlaq_f32(_sum, _r20, _k6789); _sum = vsetq_lane_f32(*outptr, _sum, 3); #if __aarch64__ *outptr = vaddvq_f32(_sum); #else float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); *outptr = vget_lane_f32(_ss, 0); #endif // __aarch64__ #else float sum = 0; sum += r0[0] * ktmp[0]; sum += r0[1] * ktmp[1]; sum += r0[2] * ktmp[2]; sum += r1[0] * ktmp[3]; sum += r1[1] * ktmp[4]; sum += r1[2] * ktmp[5]; sum += r2[0] * ktmp[6]; sum += r2[1] * ktmp[7]; sum += r2[2] * ktmp[8]; *outptr += sum; #endif // __ARM_NEON r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } ktmp += 9; } } }
GB_binop__rdiv_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_int32) // A*D function (colscale): GB (_AxD__rdiv_int32) // D*A function (rowscale): GB (_DxB__rdiv_int32) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_int32) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_int32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_int32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_int32) // C=scalar+B GB (_bind1st__rdiv_int32) // C=scalar+B' GB (_bind1st_tran__rdiv_int32) // C=A+scalar GB (_bind2nd__rdiv_int32) // C=A'+scalar GB (_bind2nd_tran__rdiv_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (bij, aij, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (y, x, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_INT32 || GxB_NO_RDIV_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (bij, x, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (y, aij, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, x, 32) ; \ } GrB_Info GB (_bind1st_tran__rdiv_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (y, aij, 32) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dgemm.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /********************************************************************************* NAME: dgemm PURPOSE: This program tests the efficiency with which a dense matrix dense multiplication is carried out USAGE: The program takes as input the number of threads, the matrix order, the number of times the matrix-matrix multiplication is carried out, and, optionally, a tile size for matrix blocking <progname> <# threads> <# iterations> <matrix order> [<tile size>] The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: Written by Rob Van der Wijngaart, September 2006. Made array dimensioning dynamic, October 2007 Allowed arbitrary block size, November 2007 Removed reverse-engineered MKL source code option, November 2007 Changed from row- to column-major storage order, November 2007 Stored blocks of B in transpose form, November 2007 ***********************************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> #if MKL #include <mkl_cblas.h> #endif #define AA_arr(i,j) AA[(i)+(block+BOFFSET)*(j)] #define BB_arr(i,j) BB[(i)+(block+BOFFSET)*(j)] #define CC_arr(i,j) CC[(i)+(block+BOFFSET)*(j)] #define A_arr(i,j) A[(i)+(order)*(j)] #define B_arr(i,j) B[(i)+(order)*(j)] #define C_arr(i,j) C[(i)+(order)*(j)] #define forder (1.0*order) int main(int argc, char **argv){ int iter, i,ii,j,jj,k,kk,ig,jg,kg; /* dummies */ int iterations; /* number of times the multiplication is done */ double dgemm_time, /* timing parameters */ avgtime; double checksum = 0.0, /* checksum of result */ ref_checksum; double epsilon = 1.e-8; /* error tolerance */ int nthread_input, /* thread parameters */ nthread; int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ static double * RESTRICT A, /* input (A,B) and output (C) matrices */ * RESTRICT B, * RESTRICT C; long order; /* number of rows and columns of matrices */ int block; /* tile size of matrices */ int shortcut; /* true if only doing initialization */ printf("Parallel Research Kernels version %s\n", PRKVERSION); printf("OpenMP Dense matrix-matrix multiplication\n"); #if !MKL if (argc != 4 && argc != 5) { printf("Usage: %s <# threads> <# iterations> <matrix order> [tile size]\n",*argv); #else if (argc != 4) { printf("Usage: %s <# threads> <# iterations> <matrix order>\n",*argv); #endif exit(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: Iterations must be positive : %d \n", iterations); exit(EXIT_FAILURE); } order = atol(*++argv); if (order < 0) { shortcut = 1; order = -order; } else shortcut = 0; if (order < 1) { printf("ERROR: Matrix order must be positive: %ld\n", order); exit(EXIT_FAILURE); } A = (double *) prk_malloc(order*order*sizeof(double)); B = (double *) prk_malloc(order*order*sizeof(double)); C = (double *) prk_malloc(order*order*sizeof(double)); if (!A || !B || !C) { printf("ERROR: Could not allocate space for global matrices\n"); exit(EXIT_FAILURE); } ref_checksum = (0.25*forder*forder*forder*(forder-1.0)*(forder-1.0)); #pragma omp parallel for private(i,j) for(j = 0; j < order; j++) for(i = 0; i < order; i++) { A_arr(i,j) = B_arr(i,j) = (double) j; C_arr(i,j) = 0.0; } #if !MKL if (argc == 5) { block = atoi(*++argv); } else block = DEFAULTBLOCK; #pragma omp parallel private (i,j,k,ii,jj,kk,ig,jg,kg,iter) { double * RESTRICT AA, * RESTRICT BB, * RESTRICT CC; if (block > 0) { /* matrix blocks for local temporary copies */ AA = (double *) prk_malloc(block*(block+BOFFSET)*3*sizeof(double)); if (!AA) { num_error = 1; printf("Could not allocate space for matrix tiles on thread %d\n", omp_get_thread_num()); } bail_out(num_error); BB = AA + block*(block+BOFFSET); CC = BB + block*(block+BOFFSET); } #pragma omp master { nthread = omp_get_num_threads(); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Matrix order = %ld\n", order); if (shortcut) printf("Only doing initialization\n"); printf("Number of threads = %d\n", nthread_input); if (block>0) printf("Blocking factor = %d\n", block); else printf("No blocking\n"); printf("Block offset = %d\n", BOFFSET); printf("Number of iterations = %d\n", iterations); printf("Using MKL library = off\n"); } } bail_out(num_error); if (shortcut) exit(EXIT_SUCCESS); for (iter=0; iter<=iterations; iter++) { if (iter==1) { #pragma omp barrier #pragma omp master { dgemm_time = wtime(); } } if (block > 0) { #pragma omp for for(jj = 0; jj < order; jj+=block){ for(kk = 0; kk < order; kk+=block) { for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) BB_arr(j,k) = B_arr(kg,jg); for(ii = 0; ii < order; ii+=block){ for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) AA_arr(i,k) = A_arr(ig,kg); for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) CC_arr(i,j) = 0.0; for (kg=kk,k=0; kg<MIN(kk+block,order); k++,kg++) for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) CC_arr(i,j) += AA_arr(i,k)*BB_arr(j,k); for (jg=jj,j=0; jg<MIN(jj+block,order); j++,jg++) for (ig=ii,i=0; ig<MIN(ii+block,order); i++,ig++) C_arr(ig,jg) += CC_arr(i,j); } } } } else { #pragma omp for for (jg=0; jg<order; jg++) for (kg=0; kg<order; kg++) for (ig=0; ig<order; ig++) C_arr(ig,jg) += A_arr(ig,kg)*B_arr(kg,jg); } } /* end of iterations */ #pragma omp barrier #pragma omp master { dgemm_time = wtime() - dgemm_time; } } /* end of parallel region */ #else printf("Matrix size = %ldx%ld\n", order, order); printf("Number of threads = %d\n", nthread_input); printf("Using MKL library = on\n"); printf("Number of iterations = %d\n", iterations); for (iter=0; iter<=iterations; iter++) { if (iter==1) dgemm_time = wtime(); cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, order, order, order, 1.0, &(A_arr(0,0)), order, &(B_arr(0,0)), order, 1.0, &(C_arr(0,0)), order); } dgemm_time = wtime()-dgemm_time; #endif for(checksum=0.0,j = 0; j < order; j++) for(i = 0; i < order; i++) checksum += C_arr(i,j); /* verification test */ ref_checksum *= (iterations+1); if (ABS((checksum - ref_checksum)/ref_checksum) > epsilon) { printf("ERROR: Checksum = %lf, Reference checksum = %lf\n", checksum, ref_checksum); exit(EXIT_FAILURE); } else { printf("Solution validates\n"); #if VERBOSE printf("Reference checksum = %lf, checksum = %lf\n", ref_checksum, checksum); #endif } double nflops = 2.0*forder*forder*forder; avgtime = dgemm_time/iterations; printf("Rate (MFlops/s): %lf Avg time (s): %lf\n", 1.0E-06 *nflops/avgtime, avgtime); exit(EXIT_SUCCESS); }
DeclOpenMP.h
//===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file defines OpenMP nodes for declarative directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_DECLOPENMP_H #define LLVM_CLANG_AST_DECLOPENMP_H #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Type.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace clang { /// This represents '#pragma omp threadprivate ...' directive. /// For example, in the following, both 'a' and 'A::b' are threadprivate: /// /// \code /// int a; /// #pragma omp threadprivate(a) /// struct A { /// static int b; /// #pragma omp threadprivate(b) /// }; /// \endcode /// class OMPThreadPrivateDecl final : public Decl, private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> { friend class ASTDeclReader; friend TrailingObjects; unsigned NumVars; virtual void anchor(); OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L), NumVars(0) { } ArrayRef<const Expr *> getVars() const { return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars); } MutableArrayRef<Expr *> getVars() { return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars); } void setVars(ArrayRef<Expr *> VL); public: static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<Expr *> VL); static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVars().begin(); } varlist_iterator varlist_end() { return getVars().end(); } varlist_const_iterator varlist_begin() const { return getVars().begin(); } varlist_const_iterator varlist_end() const { return getVars().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPThreadPrivate; } }; /// This represents '#pragma omp declare reduction ...' directive. /// For example, in the following, declared reduction 'foo' for types 'int' and /// 'float': /// /// \code /// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \ /// initializer (omp_priv = 0) /// \endcode /// /// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer. class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext { // This class stores some data in DeclContext::OMPDeclareReductionDeclBits // to save some space. Use the provided accessors to access it. public: enum InitKind { CallInit, // Initialized by function call. DirectInit, // omp_priv(<expr>) CopyInit // omp_priv = <expr> }; private: friend class ASTDeclReader; /// Combiner for declare reduction construct. Expr *Combiner = nullptr; /// Initializer for declare reduction construct. Expr *Initializer = nullptr; /// In parameter of the combiner. Expr *In = nullptr; /// Out parameter of the combiner. Expr *Out = nullptr; /// Priv parameter of the initializer. Expr *Priv = nullptr; /// Orig parameter of the initializer. Expr *Orig = nullptr; /// Reference to the previous declare reduction construct in the same /// scope with the same name. Required for proper templates instantiation if /// the declare reduction construct is declared inside compound statement. LazyDeclPtr PrevDeclInScope; virtual void anchor(); OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope); void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) { PrevDeclInScope = Prev; } public: /// Create declare reduction node. static OMPDeclareReductionDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType T, OMPDeclareReductionDecl *PrevDeclInScope); /// Create deserialized declare reduction node. static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C, unsigned ID); /// Get combiner expression of the declare reduction construct. Expr *getCombiner() { return Combiner; } const Expr *getCombiner() const { return Combiner; } /// Get In variable of the combiner. Expr *getCombinerIn() { return In; } const Expr *getCombinerIn() const { return In; } /// Get Out variable of the combiner. Expr *getCombinerOut() { return Out; } const Expr *getCombinerOut() const { return Out; } /// Set combiner expression for the declare reduction construct. void setCombiner(Expr *E) { Combiner = E; } /// Set combiner In and Out vars. void setCombinerData(Expr *InE, Expr *OutE) { In = InE; Out = OutE; } /// Get initializer expression (if specified) of the declare reduction /// construct. Expr *getInitializer() { return Initializer; } const Expr *getInitializer() const { return Initializer; } /// Get initializer kind. InitKind getInitializerKind() const { return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind); } /// Get Orig variable of the initializer. Expr *getInitOrig() { return Orig; } const Expr *getInitOrig() const { return Orig; } /// Get Priv variable of the initializer. Expr *getInitPriv() { return Priv; } const Expr *getInitPriv() const { return Priv; } /// Set initializer expression for the declare reduction construct. void setInitializer(Expr *E, InitKind IK) { Initializer = E; OMPDeclareReductionDeclBits.InitializerKind = IK; } /// Set initializer Orig and Priv vars. void setInitializerData(Expr *OrigE, Expr *PrivE) { Orig = OrigE; Priv = PrivE; } /// Get reference to previous declare reduction construct in the same /// scope with the same name. OMPDeclareReductionDecl *getPrevDeclInScope(); const OMPDeclareReductionDecl *getPrevDeclInScope() const; static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPDeclareReduction; } static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) { return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D)); } static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) { return static_cast<OMPDeclareReductionDecl *>( const_cast<DeclContext *>(DC)); } }; /// This represents '#pragma omp declare mapper ...' directive. Map clauses are /// allowed to use with this directive. The following example declares a user /// defined mapper for the type 'struct vec'. This example instructs the fields /// 'len' and 'data' should be mapped when mapping instances of 'struct vec'. /// /// \code /// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N]) /// \endcode class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext { friend class ASTDeclReader; /// Clauses associated with this mapper declaration MutableArrayRef<OMPClause *> Clauses; /// Mapper variable, which is 'v' in the example above Expr *MapperVarRef = nullptr; /// Name of the mapper variable DeclarationName VarName; LazyDeclPtr PrevDeclInScope; virtual void anchor(); OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType Ty, DeclarationName VarName, OMPDeclareMapperDecl *PrevDeclInScope) : ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), VarName(VarName), PrevDeclInScope(PrevDeclInScope) {} void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) { PrevDeclInScope = Prev; } /// Sets an array of clauses to this mapper declaration void setClauses(ArrayRef<OMPClause *> CL); public: /// Creates declare mapper node. static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType T, DeclarationName VarName, OMPDeclareMapperDecl *PrevDeclInScope); /// Creates deserialized declare mapper node. static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); /// Creates an array of clauses to this mapper declaration and intializes /// them. void CreateClauses(ASTContext &C, ArrayRef<OMPClause *> CL); using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator; using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator; using clauselist_range = llvm::iterator_range<clauselist_iterator>; using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>; unsigned clauselist_size() const { return Clauses.size(); } bool clauselist_empty() const { return Clauses.empty(); } clauselist_range clauselists() { return clauselist_range(clauselist_begin(), clauselist_end()); } clauselist_const_range clauselists() const { return clauselist_const_range(clauselist_begin(), clauselist_end()); } clauselist_iterator clauselist_begin() { return Clauses.begin(); } clauselist_iterator clauselist_end() { return Clauses.end(); } clauselist_const_iterator clauselist_begin() const { return Clauses.begin(); } clauselist_const_iterator clauselist_end() const { return Clauses.end(); } /// Get the variable declared in the mapper Expr *getMapperVarRef() { return MapperVarRef; } const Expr *getMapperVarRef() const { return MapperVarRef; } /// Set the variable declared in the mapper void setMapperVarRef(Expr *MapperVarRefE) { MapperVarRef = MapperVarRefE; } /// Get the name of the variable declared in the mapper DeclarationName getVarName() { return VarName; } /// Get reference to previous declare mapper construct in the same /// scope with the same name. OMPDeclareMapperDecl *getPrevDeclInScope(); const OMPDeclareMapperDecl *getPrevDeclInScope() const; static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPDeclareMapper; } static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) { return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D)); } static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) { return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC)); } }; /// Pseudo declaration for capturing expressions. Also is used for capturing of /// non-static data members in non-static member functions. /// /// Clang supports capturing of variables only, but OpenMP 4.5 allows to /// privatize non-static members of current class in non-static member /// functions. This pseudo-declaration allows properly handle this kind of /// capture by wrapping captured expression into a variable-like declaration. class OMPCapturedExprDecl final : public VarDecl { friend class ASTDeclReader; void anchor() override; OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType Type, TypeSourceInfo *TInfo, SourceLocation StartLoc) : VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo, SC_None) { setImplicit(); } public: static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType T, SourceLocation StartLoc); static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID); SourceRange getSourceRange() const override LLVM_READONLY; // Implement isa/cast/dyncast/etc. static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPCapturedExpr; } }; /// This represents '#pragma omp requires...' directive. /// For example /// /// \code /// #pragma omp requires unified_address /// \endcode /// class OMPRequiresDecl final : public Decl, private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> { friend class ASTDeclReader; friend TrailingObjects; // Number of clauses associated with this requires declaration unsigned NumClauses = 0; virtual void anchor(); OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L), NumClauses(0) {} /// Returns an array of immutable clauses associated with this requires /// declaration ArrayRef<const OMPClause *> getClauses() const { return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } /// Returns an array of clauses associated with this requires declaration MutableArrayRef<OMPClause *> getClauses() { return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(), NumClauses); } /// Sets an array of clauses to this requires declaration void setClauses(ArrayRef<OMPClause *> CL); public: /// Create requires node. static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<OMPClause *> CL); /// Create deserialized requires node. static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator; using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator; using clauselist_range = llvm::iterator_range<clauselist_iterator>; using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>; unsigned clauselist_size() const { return NumClauses; } bool clauselist_empty() const { return NumClauses == 0; } clauselist_range clauselists() { return clauselist_range(clauselist_begin(), clauselist_end()); } clauselist_const_range clauselists() const { return clauselist_const_range(clauselist_begin(), clauselist_end()); } clauselist_iterator clauselist_begin() { return getClauses().begin(); } clauselist_iterator clauselist_end() { return getClauses().end(); } clauselist_const_iterator clauselist_begin() const { return getClauses().begin(); } clauselist_const_iterator clauselist_end() const { return getClauses().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPRequires; } }; } // end namespace clang #endif
GB_unaryop__identity_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint8_uint64 // op(A') function: GB_tran__identity_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint8_uint64 ( uint8_t *Cx, // Cx and Ax may be aliased uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
abs.c
#include <math.h> #include "../cdefs.h" #include "../types.h" #include "../misc/safeomp.h" void bib_mabs(mat_r x) { const int m = NROWS(x); const int n = NCOLS(x); #pragma omp parallel for if(m*n > OMP_MIN_SIZE) for (int j=0; j<n; j++) { SAFE_SIMD for (int i=0; i<m; i++) DATA(x)[i + n*j] = fabs(DATA(x)[i + n*j]); } }
imginputfileconn.h
/** * DeepDetect * Copyright (c) 2014 Emmanuel Benazera * Author: Emmanuel Benazera <beniz@droidnik.fr> * * This file is part of deepdetect. * * deepdetect is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * deepdetect is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with deepdetect. If not, see <http://www.gnu.org/licenses/>. */ #ifndef IMGINPUTFILECONN_H #define IMGINPUTFILECONN_H #include "inputconnectorstrategy.h" #include <opencv2/opencv.hpp> #ifdef USE_CUDA_CV #include <opencv2/cudaimgproc.hpp> #include <opencv2/cudawarping.hpp> #include <opencv2/cudaarithm.hpp> #endif #if CV_VERSION_MAJOR >= 3 #define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR #define CV_LOAD_IMAGE_GRAYSCALE cv::IMREAD_GRAYSCALE #define CV_LOAD_IMAGE_UNCHANGED cv::IMREAD_UNCHANGED #define CV_BGR2RGB cv::COLOR_BGR2RGB #define CV_BGR2GRAY cv::COLOR_BGR2GRAY #define CV_GRAY2RGB cv::COLOR_GRAY2RGB #define CV_YCrCb2RGB cv::COLOR_YCrCb2RGB #define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR #define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb #define CV_INTER_CUBIC cv::INTER_CUBIC #endif #include "ext/base64/base64.h" #include "utils/apitools.h" #include <random> #include "dto/input_connector.hpp" namespace dd { class DDImg { public: DDImg() { } ~DDImg() { } // base64 detection bool is_within_base64_range(char c) const { if ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c == '+' || c == '/' || c == '=')) return true; else return false; } bool possibly_base64(const std::string &s) const { bool ism = is_multiple_four(s); if (!ism) return false; for (char c : s) { bool within_64 = is_within_base64_range(c); if (!within_64) return false; } return true; } bool is_multiple_four(const std::string &s) const { if (s.length() % 4 == 0) return true; else return false; } /** apply preprocessing to image */ void prepare(const cv::Mat &src, cv::Mat &dst, const std::string &img_name) const { try { if (_scaled) scale(src, dst); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if // batched images are different resolutions dst = src; } else { // Resize so that the larger dimension is set to whichever // (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different // resolutions size_t currMaxDim = std::max(src.rows, src.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::resize(src, dst, cv::Size(), scale, scale, select_cv_interp()); } } else { // Resize normally to the specified width and height cv::resize(src, dst, cv::Size(_width, _height), 0, 0, select_cv_interp()); } } catch (...) { throw InputConnectorBadParamException("failed resizing image " + img_name); } // cropping if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width) / 2; int heightBorder = (_height - _crop_height) / 2; try { dst = dst(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch (...) { throw InputConnectorBadParamException("failed cropping image " + img_name); } } // color adjustments if (_bw && dst.channels() > 1) { cv::cvtColor(dst, dst, CV_BGR2GRAY); } if (_histogram_equalization) { if (_bw) { cv::equalizeHist(dst, dst); if (_rgb) cv::cvtColor(dst, dst, CV_GRAY2RGB); } else { // We don't apply equalizeHist on each BGR channels to keep // the color balance of the image. equalizeHist(V) of HSV can // works too, the result is almost the same cv::cvtColor(dst, dst, CV_BGR2YCrCb); std::vector<cv::Mat> vec_channels; cv::split(dst, vec_channels); cv::equalizeHist(vec_channels[0], vec_channels[0]); cv::merge(vec_channels, dst); if (_rgb) cv::cvtColor(dst, dst, CV_YCrCb2RGB); else cv::cvtColor(dst, dst, CV_YCrCb2BGR); } } else if (_rgb) { if (_bw) cv::cvtColor(dst, dst, CV_GRAY2RGB); else cv::cvtColor(dst, dst, CV_BGR2RGB); } } #ifdef USE_CUDA_CV /** apply preprocessing to cuda image */ void prepare_cuda(const cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst, const std::string &img_name) const { try { if (_scaled) scale_cuda(src, dst); else if (_width == 0 || _height == 0) { if (_width == 0 && _height == 0) { // Do nothing and keep native resolution. May cause issues if // batched images are different resolutions dst = src; } else { // Resize so that the larger dimension is set to whichever // (width or height) is non-zero, maintaining aspect ratio // XXX - This may cause issues if batch images are different // resolutions size_t currMaxDim = std::max(src.rows, src.cols); double scale = static_cast<double>(std::max(_width, _height)) / static_cast<double>(currMaxDim); cv::cuda::resize(src, dst, cv::Size(), scale, scale, select_cv_interp(), *_cuda_stream); } } else { // Resize normally to the specified width and height cv::cuda::resize(src, dst, cv::Size(_width, _height), 0, 0, select_cv_interp(), *_cuda_stream); } } catch (...) { throw InputConnectorBadParamException("failed resizing image " + img_name); } // cropping if (_crop_width != 0 && _crop_height != 0) { int widthBorder = (_width - _crop_width) / 2; int heightBorder = (_height - _crop_height) / 2; try { // TODO cuda crop with stream dst = dst(cv::Rect(widthBorder, heightBorder, _crop_width, _crop_height)); } catch (...) { throw InputConnectorBadParamException("failed cropping image " + img_name); } } // color adjustment if (_bw && dst.channels() > 1) { cv::cuda::cvtColor(dst, dst, CV_BGR2GRAY, 0, *_cuda_stream); } if (_histogram_equalization) { if (_bw) { cv::cuda::equalizeHist(dst, dst, *_cuda_stream); if (_rgb) cv::cuda::cvtColor(dst, dst, CV_GRAY2RGB, 0, *_cuda_stream); } else { // We don't apply equalizeHist on each BGR channels to keep // the color balance of the image. equalizeHist(V) of HSV can // works too, the result is almost the same cv::cuda::cvtColor(dst, dst, CV_BGR2YCrCb, 0, *_cuda_stream); std::vector<cv::cuda::GpuMat> vec_channels; cv::cuda::split(dst, vec_channels, *_cuda_stream); cv::cuda::equalizeHist(vec_channels[0], vec_channels[0], *_cuda_stream); cv::cuda::merge(vec_channels, dst, *_cuda_stream); if (_rgb) cv::cuda::cvtColor(dst, dst, CV_YCrCb2RGB, 0, *_cuda_stream); else cv::cuda::cvtColor(dst, dst, CV_YCrCb2BGR, 0, *_cuda_stream); } } else if (_rgb) { if (_bw) cv::cuda::cvtColor(dst, dst, CV_GRAY2RGB, 0, *_cuda_stream); else cv::cuda::cvtColor(dst, dst, CV_BGR2RGB, 0, *_cuda_stream); } } #endif void scale(const cv::Mat &src, cv::Mat &dst) const { float coef = std::min( static_cast<float>(_scale_max) / std::max(src.rows, src.cols), static_cast<float>(_scale_min) / std::min(src.rows, src.cols)); cv::resize(src, dst, cv::Size(), coef, coef, select_cv_interp()); } #ifdef USE_CUDA_CV void scale_cuda(const cv::cuda::GpuMat &src, cv::cuda::GpuMat &dst) const { float coef = std::min( static_cast<float>(_scale_max) / std::max(src.rows, src.cols), static_cast<float>(_scale_min) / std::min(src.rows, src.cols)); cv::cuda::resize(src, dst, cv::Size(), coef, coef, select_cv_interp(), *_cuda_stream); } #endif /// Apply preprocessing to image and add it to the list of images /// img_name: name of the image as displayed in error messages int add_image(const cv::Mat &img, const std::string &img_name) { if (img.empty()) { _logger->error("empty image {}", img_name); return -1; } _imgs_size.push_back(std::pair<int, int>(img.rows, img.cols)); #ifdef USE_CUDA_CV if (_cuda) { cv::cuda::GpuMat d_src; d_src.upload(img); if (_keep_orig) _cuda_orig_imgs.push_back(d_src); cv::cuda::GpuMat d_dst; prepare_cuda(d_src, d_dst, img_name); _cuda_imgs.push_back(std::move(d_dst)); } else #endif { if (_keep_orig) _orig_imgs.push_back(img); cv::Mat rimg; prepare(img, rimg, img_name); _imgs.push_back(std::move(rimg)); } return 0; } #ifdef USE_CUDA_CV /// add_image but directly from a cv::cuda::GpuMat int add_image_cuda(const cv::cuda::GpuMat &d_src, const std::string &img_name) { _imgs_size.push_back(std::pair<int, int>(d_src.rows, d_src.cols)); if (_keep_orig) _cuda_orig_imgs.push_back(d_src); cv::cuda::GpuMat d_dst; prepare_cuda(d_src, d_dst, img_name); _cuda_imgs.push_back(std::move(d_dst)); return 0; } #endif // decode image void decode(const std::string &str) { std::vector<unsigned char> vdat(str.begin(), str.end()); cv::Mat img = cv::Mat(cv::imdecode( cv::Mat(vdat, false), _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR))); add_image(img, "base64 image"); } // deserialize image, independent of format void deserialize(std::stringstream &input) { size_t size = 0; input.seekg(0, input.end); size = input.tellg(); input.seekg(0, input.beg); char *data = new char[size]; input.read(data, size); std::string str(data, data + size); delete[] data; decode(str); } // data acquisition int read_file(const std::string &fname, int test_id) { (void)test_id; cv::Mat img = cv::imread(fname, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); return add_image(img, fname); } int read_db(const std::string &fname) { _db_fname = fname; return 0; } int read_mem(const std::string &content) { _in_mem = true; cv::Mat timg; _b64 = possibly_base64(content); if (_b64) { std::string ccontent; Base64::Decode(content, &ccontent); std::stringstream sstr; sstr << ccontent; deserialize(sstr); } else { decode(content); } if (_imgs.at(0).empty()) return -1; return 0; } int read_dir(const std::string &dir, int test_id) { (void)test_id; // list directories in dir std::unordered_set<std::string> subdirs; if (fileops::list_directory(dir, false, true, false, subdirs)) throw InputConnectorBadParamException( "failed reading text subdirectories in data directory " + dir); _logger->info("imginputfileconn: list subdirs size={}", subdirs.size()); // list files and classes std::vector<std::pair<std::string, int>> lfiles; // labeled files std::unordered_map<int, std::string> hcorresp; // correspondence class number / class name if (!subdirs.empty()) { int cl = 0; auto uit = subdirs.begin(); while (uit != subdirs.end()) { std::unordered_set<std::string> subdir_files; if (fileops::list_directory((*uit), true, false, true, subdir_files)) throw InputConnectorBadParamException( "failed reading image data sub-directory " + (*uit)); auto fit = subdir_files.begin(); while (fit != subdir_files.end()) // XXX: re-iterating the file // is not optimal { lfiles.push_back(std::pair<std::string, int>((*fit), cl)); ++fit; } ++cl; ++uit; } } else { std::unordered_set<std::string> test_files; fileops::list_directory(dir, true, false, false, test_files); auto fit = test_files.begin(); while (fit != test_files.end()) { lfiles.push_back( std::pair<std::string, int>((*fit), -1)); // -1 for no class ++fit; } } // read images _imgs.reserve(lfiles.size()); _img_files.reserve(lfiles.size()); _labels.reserve(lfiles.size()); for (std::pair<std::string, int> &p : lfiles) { cv::Mat img = cv::imread( p.first, _unchanged_data ? CV_LOAD_IMAGE_UNCHANGED : (_bw ? CV_LOAD_IMAGE_GRAYSCALE : CV_LOAD_IMAGE_COLOR)); add_image(img, p.first); _img_files.push_back(p.first); if (p.second >= 0) _labels.push_back(p.second); if (_imgs.size() % 1000 == 0) _logger->info("read {} images", _imgs.size()); } return 0; } int select_cv_interp() const { if (_interp == "nearest") return cv::INTER_NEAREST; else if (_interp == "linear") return cv::INTER_LINEAR; else if (_interp == "area") return cv::INTER_AREA; else if (_interp == "lanczos4") return cv::INTER_LANCZOS4; else /* if (_interp == "cubic") */ return cv::INTER_CUBIC; // default } std::vector<cv::Mat> _imgs; std::vector<cv::Mat> _orig_imgs; std::vector<std::string> _img_files; std::vector<std::pair<int, int>> _imgs_size; bool _bw = false; bool _rgb = false; bool _histogram_equalization = false; bool _in_mem = false; bool _unchanged_data = false; std::vector<int> _labels; int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; float _scale = 1.0; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; bool _keep_orig = false; bool _b64 = false; std::string _interp = "cubic"; #ifdef USE_CUDA_CV bool _cuda = false; std::vector<cv::cuda::GpuMat> _cuda_imgs; std::vector<cv::cuda::GpuMat> _cuda_orig_imgs; cv::cuda::Stream *_cuda_stream = nullptr; #endif std::string _db_fname; std::shared_ptr<spdlog::logger> _logger; }; class ImgInputFileConn : public InputConnectorStrategy { public: ImgInputFileConn() : InputConnectorStrategy() { } ImgInputFileConn(const ImgInputFileConn &i) : InputConnectorStrategy(i), _width(i._width), _height(i._height), _crop_width(i._crop_width), _crop_height(i._crop_height), _bw(i._bw), _rgb(i._rgb), _unchanged_data(i._unchanged_data), _test_split(i._test_split), _mean(i._mean), _has_mean_scalar(i._has_mean_scalar), _scale(i._scale), _scaled(i._scaled), _scale_min(i._scale_min), _scale_max(i._scale_max), _keep_orig(i._keep_orig), _interp(i._interp) #ifdef USE_CUDA_CV , _cuda(i._cuda) #endif { } ~ImgInputFileConn() { } void init(const APIData &ad) { fillup_parameters(ad); } void fillup_parameters(const APIData &ad) { auto params = ad.createSharedDTO<dd::DTO::InputConnector>(); fillup_parameters(params); } void fillup_parameters(oatpp::Object<DTO::InputConnector> params) { // optional parameters. if (params->width) _width = params->width; if (params->height) _height = params->height; if (params->crop_width) { if (params->crop_width > _width) { _logger->error("Crop width must be less than or equal to width"); throw InputConnectorBadParamException( "Crop width must be less than or equal to width"); } _width = params->crop_width; } if (params->crop_height) { if (params->crop_height > _height) { _logger->error( "Crop height must be less than or equal to height"); throw InputConnectorBadParamException( "Crop height must be less than or equal to height"); } _height = params->crop_height; } if (params->bw != nullptr) _bw = params->bw; if (params->rgb != nullptr) _rgb = params->rgb; if (params->histogram_equalization != nullptr) _histogram_equalization = params->histogram_equalization; if (params->unchanged_data != nullptr) _unchanged_data = params->unchanged_data; if (params->shuffle != nullptr) _shuffle = params->shuffle; if (params->seed) _seed = params->seed; if (params->test_split) _test_split = params->test_split; if (params->mean) { // NOTE(sileht): if we have two much of this we can create // an oat++ type that directly handle std::vector<float> instead // of using the oatpp::Vector<oatpp::Float32> _mean = std::vector<float>(); for (auto &v : *params->mean) _mean.push_back(v); _has_mean_scalar = true; } if (params->std) { _std = std::vector<float>(); for (auto &v : *params->std) _std.push_back(v); } // Variable size _scaled |= params->scaled; if (params->scale) try { _scale = params->scale.retrieve<oatpp::Float64>(); } catch (const std::runtime_error &error) { std::string msg = "could not read double value for scale input parameter"; _logger->error(msg); throw InputConnectorBadParamException(msg); } if (params->scale_min) { _scaled = true; _scale_min = params->scale_min; } if (params->scale_max) { _scaled = true; _scale_max = params->scale_max; } // whether to keep original image (for chained ops, e.g. cropping) _keep_orig |= params->keep_orig; // image interpolation method if (params->interp) _interp = params->interp->std_str(); // timeout this->set_timeout(params); #ifdef USE_CUDA_CV // image resizing on GPU _cuda |= params->cuda; #endif } void copy_parameters_to(DDImg &dimg) const { dimg._bw = _bw; dimg._rgb = _rgb; dimg._histogram_equalization = _histogram_equalization; dimg._unchanged_data = _unchanged_data; dimg._width = _width; dimg._height = _height; dimg._crop_width = _crop_width; dimg._crop_height = _crop_height; dimg._scale = _scale; dimg._scaled = _scaled; dimg._scale_min = _scale_min; dimg._scale_max = _scale_max; dimg._keep_orig = _keep_orig; dimg._interp = _interp; #ifdef USE_CUDA_CV dimg._cuda = _cuda; dimg._cuda_stream = _cuda_stream; #endif dimg._logger = _logger; } int feature_size() const { if (_bw || _unchanged_data) { // XXX: only valid for single channels if (_crop_width != 0 && _crop_height != 0) return _crop_width * _crop_height; else return _width * _height; } else { // RGB if (_crop_width != 0 && _crop_height != 0) return _crop_width * _crop_height * 3; else return _width * _height * 3; } } int batch_size() const { return _images.size(); } int test_batch_size() const { return _test_images.size(); } // add cuda raw images void add_raw_images(const std::vector<cv::Mat> &imgs #ifdef USE_CUDA_CV , const std::vector<cv::cuda::GpuMat> &cuda_imgs #endif ) { std::vector<std::string> uris; DataEl<DDImg> dimg(this->_input_timeout); copy_parameters_to(dimg._ctype); int i = 0; // preprocess #ifdef USE_CUDA_CV for (auto cuda_img : cuda_imgs) { if (!_ids.empty()) uris.push_back(_ids.at(i)); else { _ids.push_back(std::to_string(i)); uris.push_back(_ids.back()); } dimg._ctype.add_image_cuda(cuda_img, _ids.back()); ++i; } #endif for (auto img : imgs) { if (!_ids.empty()) uris.push_back(_ids.at(i)); else { _ids.push_back(std::to_string(i)); uris.push_back(_ids.back()); } dimg._ctype.add_image(img, _ids.back()); ++i; } // add preprocessed images #ifdef USE_CUDA_CV if (_cuda) { if (_keep_orig) _cuda_orig_images.insert(_cuda_orig_images.end(), dimg._ctype._cuda_orig_imgs.begin(), dimg._ctype._cuda_orig_imgs.end()); _cuda_images.insert(_cuda_images.end(), dimg._ctype._cuda_imgs.begin(), dimg._ctype._cuda_imgs.end()); } else #endif { if (_keep_orig) _orig_images = dimg._ctype._orig_imgs; _images = dimg._ctype._imgs; } _images_size.insert(_images_size.end(), dimg._ctype._imgs_size.begin(), dimg._ctype._imgs_size.end()); if (!uris.empty()) _uris = uris; } void get_data(oatpp::Object<DTO::ServicePredict> pred_in) { if (!pred_in->_data_raw_img.empty() #ifdef USE_CUDA_CV || !pred_in->_data_raw_img_cuda.empty() #endif ) { _ids = pred_in->_ids; _meta_uris = pred_in->_meta_uris; _index_uris = pred_in->_index_uris; add_raw_images(pred_in->_data_raw_img #ifdef USE_CUDA_CV , pred_in->_data_raw_img_cuda #endif ); } else InputConnectorStrategy::get_data(pred_in); } void get_data(const APIData &ad) { // check for raw cv::Mat if (ad.has("data_raw_img") #ifdef USE_CUDA_CV || ad.has("data_raw_img_cuda") #endif ) { if (ad.has("ids")) _ids = ad.get("ids").get<std::vector<std::string>>(); if (ad.has("meta_uris")) _meta_uris = ad.get("meta_uris").get<std::vector<std::string>>(); if (ad.has("index_uris")) _index_uris = ad.get("index_uris").get<std::vector<std::string>>(); std::vector<cv::Mat> imgs = ad.has("data_raw_img") ? ad.get("data_raw_img").get<std::vector<cv::Mat>>() : std::vector<cv::Mat>(); #ifdef USE_CUDA_CV std::vector<cv::cuda::GpuMat> cuda_imgs = ad.has("data_raw_img_cuda") ? ad.get("data_raw_img_cuda") .get<std::vector<cv::cuda::GpuMat>>() : std::vector<cv::cuda::GpuMat>(); add_raw_images(imgs, cuda_imgs); #else add_raw_images(imgs); #endif } else InputConnectorStrategy::get_data(ad); } void transform(const APIData &ad) { if (ad.has( "parameters")) // hotplug of parameters, overriding the defaults { APIData ad_param = ad.getobj("parameters"); if (ad_param.has("input")) { fillup_parameters(ad_param.getobj("input")); } } get_data(ad); transform(nullptr); } void transform(oatpp::Object<DTO::ServicePredict> input_dto) { if (input_dto != nullptr) // [temporary] == nullptr if called from // transform(APIData) { fillup_parameters(input_dto->parameters->input); get_data(input_dto); } if (!_images.empty() // got ready raw images #ifdef USE_CUDA_CV || !_cuda_images.empty() // got ready cuda images #endif ) { return; } int catch_read = 0; std::string catch_msg; std::vector<std::string> uris; std::vector<std::string> meta_uris; std::vector<std::string> index_uris; std::vector<std::string> failed_uris; #pragma omp parallel for for (size_t i = 0; i < _uris.size(); i++) { bool no_img = false; std::string u = _uris.at(i); DataEl<DDImg> dimg(this->_input_timeout); copy_parameters_to(dimg._ctype); try { if (dimg.read_element(u, this->_logger)) { _logger->error("no data for image {}", u); no_img = true; } if (!dimg._ctype._db_fname.empty()) _db_fname = dimg._ctype._db_fname; } catch (std::exception &e) { #pragma omp critical { ++catch_read; catch_msg = e.what(); failed_uris.push_back(u); no_img = true; } } if (no_img) continue; if (!_db_fname.empty()) continue; #pragma omp critical { #ifdef USE_CUDA_CV if (_cuda) { _cuda_images.insert( _cuda_images.end(), std::make_move_iterator(dimg._ctype._cuda_imgs.begin()), std::make_move_iterator(dimg._ctype._cuda_imgs.end())); _cuda_orig_images.insert( _cuda_orig_images.end(), std::make_move_iterator( dimg._ctype._cuda_orig_imgs.begin()), std::make_move_iterator( dimg._ctype._cuda_orig_imgs.end())); } else #endif { _images.insert( _images.end(), std::make_move_iterator(dimg._ctype._imgs.begin()), std::make_move_iterator(dimg._ctype._imgs.end())); if (_keep_orig) _orig_images.insert( _orig_images.end(), std::make_move_iterator(dimg._ctype._orig_imgs.begin()), std::make_move_iterator(dimg._ctype._orig_imgs.end())); } _images_size.insert( _images_size.end(), std::make_move_iterator(dimg._ctype._imgs_size.begin()), std::make_move_iterator(dimg._ctype._imgs_size.end())); if (!dimg._ctype._labels.empty()) _test_labels.insert( _test_labels.end(), std::make_move_iterator(dimg._ctype._labels.begin()), std::make_move_iterator(dimg._ctype._labels.end())); if (!_ids.empty()) uris.push_back(_ids.at(i)); else if (!dimg._ctype._b64 && dimg._ctype._imgs.size() == 1) uris.push_back(u); else if (!dimg._ctype._img_files.empty()) uris.insert( uris.end(), std::make_move_iterator(dimg._ctype._img_files.begin()), std::make_move_iterator(dimg._ctype._img_files.end())); else uris.push_back(std::to_string(i)); if (!_meta_uris.empty()) meta_uris.push_back(_meta_uris.at(i)); if (!_index_uris.empty()) index_uris.push_back(_index_uris.at(i)); } } if (catch_read) { for (auto s : failed_uris) _logger->error("failed reading image {}", s); throw InputConnectorBadParamException(catch_msg); } _uris = uris; _ids = _uris; // since uris may be in different order than before // transform _meta_uris = meta_uris; _index_uris = index_uris; if (!_db_fname.empty()) return; // db filename is passed to backend // shuffle before possible split if (_shuffle) { std::mt19937 g; if (_seed >= 0) g = std::mt19937(_seed); else { std::random_device rd; g = std::mt19937(rd()); } std::shuffle(_images.begin(), _images.end(), g); // XXX beware: labels are not shuffled, i.e. let's // not shuffle while testing } // split as required if (_test_split > 0) { int split_size = std::floor(_images.size() * (1.0 - _test_split)); auto chit = _images.begin(); auto dchit = chit; int cpos = 0; while (chit != _images.end()) { if (cpos == split_size) { if (dchit == _images.begin()) dchit = chit; _test_images.push_back((*chit)); } else ++cpos; ++chit; } _images.erase(dchit, _images.end()); _logger->info("data split test size={} / remaining data size={}", _test_images.size(), _images.size()); } if (_images.empty() #ifdef USE_CUDA_CV && _cuda_images.empty() #endif ) throw InputConnectorBadParamException("no image could be found"); } static std::vector<double> img_resize_vector(const std::vector<double> &vals, const int height_net, const int width_net, const int height_dest, const int width_dest, bool resize_nn) { cv::Mat segimg = cv::Mat(height_net, width_net, CV_64FC1); std::memcpy(segimg.data, vals.data(), vals.size() * sizeof(double)); cv::Mat segimg_res; if (resize_nn) cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0, cv::INTER_NEAREST); else cv::resize(segimg, segimg_res, cv::Size(width_dest, height_dest), 0, 0, cv::INTER_LINEAR); return std::vector<double>((double *)segimg_res.data, (double *)segimg_res.data + segimg_res.rows * segimg_res.cols); } // data std::vector<cv::Mat> _images; std::vector<cv::Mat> _orig_images; /**< stored upon request. */ std::vector<cv::Mat> _test_images; std::vector<int> _test_labels; std::vector<std::pair<int, int>> _images_size; #ifdef USE_CUDA_CV std::vector<cv::cuda::GpuMat> _cuda_images; /**< cuda images for full-GPU processing. */ std::vector<cv::cuda::GpuMat> _cuda_orig_images; /**< original images stored on GPU */ #endif // image parameters int _width = 224; int _height = 224; int _crop_width = 0; int _crop_height = 0; bool _bw = false; /**< whether to convert to black & white. */ bool _rgb = false; /**< whether to convert to rgb. */ bool _histogram_equalization = false; /**< whether to apply histogram equalizer. */ bool _unchanged_data = false; /**< IMREAD_UNCHANGED flag. */ double _test_split = 0.0; /**< auto-split of the dataset. */ int _seed = -1; /**< shuffling seed. */ std::vector<float> _mean; /**< mean image pixels, to be subtracted from images. */ std::vector<float> _std; /**< std, to divide image values. */ bool _has_mean_scalar = false; /**< whether scalar is set. */ std::string _db_fname; double _scale = 1.0; bool _scaled = false; int _scale_min = 600; int _scale_max = 1000; bool _keep_orig = false; std::string _interp = "cubic"; #ifdef USE_CUDA_CV bool _cuda = false; cv::cuda::Stream *_cuda_stream = &cv::cuda::Stream::Null(); #endif }; } #endif
FlopCounterFunctor.h
/** * @file FlopCounterFunctor.h * * @date 22 Jan 2018 * @author tchipevn */ #pragma once #include "autopas/pairwiseFunctors/Functor.h" #include "autopas/utils/ArrayMath.h" namespace autopas { /** * This class helps in getting the number of performed floating point * operations. It is a functor that only calculated the amount of floating point * operations. * @todo this class currently is limited to the following case: * - constant cutoff radius * - constant amount of floating point operations for one kernel call (distance * < cutoff) * @tparam Particle * @tparam ParticleCell */ template <class Particle, class ParticleCell> class FlopCounterFunctor : public Functor<Particle, ParticleCell, typename Particle::SoAArraysType, FlopCounterFunctor<Particle, ParticleCell>> { typedef typename Particle::SoAArraysType SoAArraysType; public: bool isRelevantForTuning() override { return false; } bool allowsNewton3() override { return true; } bool allowsNonNewton3() override { return true; } /** * constructor of FlopCounterFunctor * @param cutoffRadius the cutoff radius */ explicit FlopCounterFunctor<Particle, ParticleCell>(typename Particle::ParticleFloatingPointType cutoffRadius) : autopas::Functor<Particle, ParticleCell, typename Particle::SoAArraysType, FlopCounterFunctor<Particle, ParticleCell>>(cutoffRadius), _cutoffSquare(cutoffRadius * cutoffRadius), _distanceCalculations(0ul), _kernelCalls(0ul) {} void AoSFunctor(Particle &i, Particle &j, bool newton3) override { auto dr = ArrayMath::sub(i.getR(), j.getR()); double dr2 = ArrayMath::dot(dr, dr); #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { ++_distanceCalculations; if (dr2 <= _cutoffSquare) ++_kernelCalls; }; } void SoAFunctor(SoAView<SoAArraysType> soa, bool newton3) override { if (soa.getNumParticles() == 0) return; double *const __restrict__ x1ptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y1ptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z1ptr = soa.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = i + 1; j < soa.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x1ptr[j]; const double dry = y1ptr[i] - y1ptr[j]; const double drz = z1ptr[i] - z1ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) ++kernelCallsAcc; } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void SoAFunctor(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override { double *const __restrict__ x1ptr = soa1.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y1ptr = soa1.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z1ptr = soa1.template begin<Particle::AttributeNames::posZ>(); double *const __restrict__ x2ptr = soa2.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ y2ptr = soa2.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ z2ptr = soa2.template begin<Particle::AttributeNames::posZ>(); for (unsigned int i = 0; i < soa1.getNumParticles(); ++i) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) { ++distanceCalculationsAcc; const double drx = x1ptr[i] - x2ptr[j]; const double dry = y1ptr[i] - y2ptr[j]; const double drz = z1ptr[i] - z2ptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void SoAFunctor(SoAView<SoAArraysType> soa, const std::vector<std::vector<size_t, autopas::AlignedAllocator<size_t>>> &neighborList, size_t iFrom, size_t iTo, bool newton3) override { auto numParts = soa.getNumParticles(); if (numParts == 0) return; double *const __restrict__ xptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict__ yptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict__ zptr = soa.template begin<Particle::AttributeNames::posZ>(); for (size_t i = iFrom; i < iTo; ++i) { const size_t listSizeI = neighborList[i].size(); const size_t *const __restrict__ currentList = neighborList[i].data(); // this is a magic number, that should correspond to at least // vectorization width*N have testet multiple sizes: // 4: small speedup compared to AoS // 8: small speedup compared to AoS // 12: small but best speedup compared to Aos // 16: smaller speedup // in theory this is a variable, we could auto-tune over... #ifdef __AVX512F__ // use a multiple of 8 for avx const size_t vecsize = 16; #else // for everything else 12 is faster const size_t vecsize = 12; #endif size_t joff = 0; // if the size of the verlet list is larger than the given size vecsize, // we will use a vectorized version. if (listSizeI >= vecsize) { alignas(64) std::array<double, vecsize> xtmp{}, ytmp{}, ztmp{}, xArr{}, yArr{}, zArr{}; // broadcast of the position of particle i for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xtmp[tmpj] = xptr[i]; ytmp[tmpj] = yptr[i]; ztmp[tmpj] = zptr[i]; } // loop over the verlet list from 0 to x*vecsize for (; joff < listSizeI - vecsize + 1; joff += vecsize) { unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // in each iteration we calculate the interactions of particle i with // vecsize particles in the neighborlist of particle i starting at // particle joff // gather position of particle j #pragma omp simd safelen(vecsize) for (size_t tmpj = 0; tmpj < vecsize; tmpj++) { xArr[tmpj] = xptr[currentList[joff + tmpj]]; yArr[tmpj] = yptr[currentList[joff + tmpj]]; zArr[tmpj] = zptr[currentList[joff + tmpj]]; } // do omp simd with reduction of the interaction #pragma omp simd reduction(+ : kernelCallsAcc, distanceCalculationsAcc) safelen(vecsize) for (size_t j = 0; j < vecsize; j++) { ++distanceCalculationsAcc; const double drx = xtmp[j] - xArr[j]; const double dry = ytmp[j] - yArr[j]; const double drz = ztmp[j] - zArr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; const unsigned long mask = (dr2 <= _cutoffSquare) ? 1 : 0; kernelCallsAcc += mask; } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } unsigned long distanceCalculationsAcc = 0; unsigned long kernelCallsAcc = 0; // this loop goes over the remainder and uses no optimizations for (size_t jNeighIndex = joff; jNeighIndex < listSizeI; ++jNeighIndex) { size_t j = neighborList[i][jNeighIndex]; if (i == j) continue; ++distanceCalculationsAcc; const double drx = xptr[i] - xptr[j]; const double dry = yptr[i] - yptr[j]; const double drz = zptr[i] - zptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; if (dr2 <= _cutoffSquare) { ++kernelCallsAcc; } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif { _distanceCalculations += distanceCalculationsAcc; _kernelCalls += kernelCallsAcc; } } } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size = device_handle.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size * size; _kernelCalls += size * size; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } void CudaFunctor(CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle1, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle2, bool newton3) override { #if defined(AUTOPAS_CUDA) // estimate flops on GPU size_t size1 = device_handle1.template get<Particle::AttributeNames::posX>().size(); size_t size2 = device_handle2.template get<Particle::AttributeNames::posX>().size(); _distanceCalculations += size1 * size2; _kernelCalls += size1 * size2; #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoALoader */ void deviceSoALoader(::autopas::SoA<SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::posX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posX>()); device_handle.template get<Particle::AttributeNames::posY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posY>()); device_handle.template get<Particle::AttributeNames::posZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::posZ>()); device_handle.template get<Particle::AttributeNames::forceX>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyHostToDevice( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::deviceSoAExtractor */ void deviceSoAExtractor(::autopas::SoA<SoAArraysType> &soa, CudaSoA<typename Particle::CudaDeviceArraysType> &device_handle) override { #if defined(AUTOPAS_CUDA) size_t size = soa.getNumParticles(); if (size == 0) return; device_handle.template get<Particle::AttributeNames::forceX>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceX>()); device_handle.template get<Particle::AttributeNames::forceY>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceY>()); device_handle.template get<Particle::AttributeNames::forceZ>().copyDeviceToHost( size, soa.template begin<Particle::AttributeNames::forceZ>()); #else utils::ExceptionHandler::exception("AutoPas was compiled without CUDA support!"); #endif } /** * @copydoc Functor::getNeededAttr() */ constexpr static const std::array<typename Particle::AttributeNames, 3> getNeededAttr() { return std::array<typename Particle::AttributeNames, 3>{ Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ}; } /** * @copydoc Functor::getNeededAttr(std::false_type) */ constexpr static const std::array<typename Particle::AttributeNames, 3> getNeededAttr(std::false_type) { return getNeededAttr(); } /** * @copydoc Functor::getComputedAttr() */ constexpr static const std::array<typename Particle::AttributeNames, 0> getComputedAttr() { return std::array<typename Particle::AttributeNames, 0>{/*Nothing*/}; } /** * get the hit rate of the pair-wise interaction, i.e. the ratio of the number * of kernel calls compared to the number of distance calculations * @return the hit rate */ double getHitRate() { return static_cast<double>(_kernelCalls) / static_cast<double>(_distanceCalculations); } /** * get the total number of flops * @param numFlopsPerKernelCall * @return */ double getFlops(unsigned long numFlopsPerKernelCall) const { const double distFlops = numFlopsPerDistanceCalculation * static_cast<double>(_distanceCalculations); const double kernFlops = numFlopsPerKernelCall * static_cast<double>(_kernelCalls); return distFlops + kernFlops; } /** * get the number of calculated distance operations * @return */ unsigned long getDistanceCalculations() const { return _distanceCalculations; } /** * get the number of kernel calls, i.e. the number of pairs of particles with * a distance not larger than the cutoff * @return */ unsigned long getKernelCalls() const { return _kernelCalls; } /** * number of flops for one distance calculation. * 3 sub + 3 square + 2 add */ static constexpr double numFlopsPerDistanceCalculation = 8.0; private: double _cutoffSquare; unsigned long _distanceCalculations, _kernelCalls; }; } // namespace autopas
bug_set_schedule_0.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <omp.h> #include "omp_testsuite.h" /* Test that the chunk size is set to default (1) when chunk size <= 0 is specified */ int a = 0; int test_set_schedule_0() { int i; a = 0; omp_set_schedule(omp_sched_dynamic,0); #pragma omp parallel { #pragma omp for schedule(runtime) for(i = 0; i < 10; i++) { #pragma omp atomic a++; if(a > 10) exit(1); } } return a==10; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_set_schedule_0()) { num_failed++; } } return num_failed; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o high_white: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_unaryop__lnot_fp32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp32_uint16 // op(A') function: GB_tran__lnot_fp32_uint16 // C type: float // A type: uint16_t // cast: float cij = (float) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp32_uint16 ( float *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB042-3mm-tile-no.c
/** * 3mm.c: This file is part of the PolyBench/C 3.2 test suite. * with tiling 16x16 and nested SIMD * * Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://polybench.sourceforge.net * License: /LICENSE.OSU.txt */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include "polybench/polybench.h" /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "polybench/3mm.h" /* Array initialization. */ static void init_array(int ni,int nj,int nk,int nl,int nm,double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0]) { //int i; //int j; { int c3; int c4; int c1; int c2; if (ni >= ((0 > -1 * nj + -1 * nm + 1?0 : -1 * nj + -1 * nm + 1)) && nj >= 0 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) < (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16))?(((nk + ni + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + ni + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + ni + nj + nm + -1) + 16 - 1) / 16))) : (nk + ni + nj + nm + -1) / 16)) : (((nk + ni + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + ni + nj + 2 * nm + -2) / 16)))); c1++) { if (c1 <= (((((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) < nm + -1?((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) < nm + -1?((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (((nj > nl?nj : nl)) > nm?((nj > nl?nj : nl)) : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nk > nl?nk : nl); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nk > nm?nk : nm); c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (((nk > nl?nk : nl)) > nm?((nk > nl?nk : nl)) : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = (nj > nk?nj : nk); c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (((nj > nk?nj : nk)) > nl?((nj > nk?nj : nk)) : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } for (c3 = nj; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nl + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } if (c1 == c2) { #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (nj > nm?nj : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } for (c3 = nk; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= ((((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) < nm + -1?((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } if (c1 == c2) { #pragma omp simd for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (nk > nm?nk : nm); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= nk + -1; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= nm + -1; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } for (c3 = (nj > nk?nj : nk); c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (((nj > nk?nj : nk)) > nm?((nj > nk?nj : nk)) : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (ni > nm?ni : nm); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } for (c3 = (ni > nj?ni : nj); c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (((ni > nj?ni : nj)) > nm?((ni > nj?ni : nj)) : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = (ni > nk?ni : nk); c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (((ni > nk?ni : nk)) > nm?((ni > nk?ni : nk)) : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (((ni > nj?ni : nj)) > nk?((ni > nj?ni : nj)) : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nk + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } if (c1 == c2) { #pragma omp simd for (c4 = nk; c4 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = nk; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = (ni > nk?ni : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) < nl + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = (nj > nk?nj : nk); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (ni > nm?ni : nm); c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) { for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) < nm + -1?((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nk > nl?nk : nl); c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nm; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nk; c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (ni > nm?ni : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (ni > nj?ni : nj); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nk > nl?nk : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nk > nm?nk : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (ni > nj?ni : nj); c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nj + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nm + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nm + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) { for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nk; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) { for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nk; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) { for (c2 = (((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nj; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nj; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) > ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))?((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16)))) : ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) { for (c2 = (((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } if (ni >= ((0 > -1 * nj + 1?0 : -1 * nj + 1)) && nj >= 0 && nk >= 1 && nm <= -1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) < (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16))?(((nk + ni + -1) * 16 < 0?((16 < 0?-((-(nk + ni + -1) + 16 + 1) / 16) : -((-(nk + ni + -1) + 16 - 1) / 16))) : (nk + ni + -1) / 16)) : (((nk + ni + nj + -2) * 16 < 0?((16 < 0?-((-(nk + ni + nj + -2) + 16 + 1) / 16) : -((-(nk + ni + nj + -2) + 16 - 1) / 16))) : (nk + ni + nj + -2) / 16)))); c1++) { if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nk + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nk + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } } } if (ni >= 0 && nj <= -1 && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((ni + nm + -1) * 16 < 0?((16 < 0?-((-(ni + nm + -1) + 16 + 1) / 16) : -((-(ni + nm + -1) + 16 - 1) / 16))) : (ni + nm + -1) / 16)); c1++) { if (c1 <= (((((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) < nm + -1?((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) < nl + -1?((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)) : nl + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } #pragma omp simd for (c4 = nk; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } for (c3 = ni; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((ni * 16 < 0?-(-ni / 16) : ((16 < 0?(-ni + - 16 - 1) / - 16 : (ni + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) { for (c2 = (nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } if (nj <= -1 && nk >= 1 && nm <= -1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nk + -1?16 * c2 + 15 : nk + -1)); c4++) { A[c3][c4] = ((double )c3) * c4 / ni; } } } } } if (ni >= 0 && nj >= 0 && nk <= -1 && nm >= 1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) { if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } if (ni >= 0 && nj <= -1 && nk <= -1 && nl >= 1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } if (ni <= -1 && nj >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nk >= ((0 > -1 * nm + 1?0 : -1 * nm + 1)) && nm >= 0) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) < (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16))?(((nk + nj + nm + -1) * 16 < 0?((16 < 0?-((-(nk + nj + nm + -1) + 16 + 1) / 16) : -((-(nk + nj + nm + -1) + 16 - 1) / 16))) : (nk + nj + nm + -1) / 16)) : (((nk + nj + 2 * nm + -2) * 16 < 0?((16 < 0?-((-(nk + nj + 2 * nm + -2) + 16 + 1) / 16) : -((-(nk + nj + 2 * nm + -2) + 16 - 1) / 16))) : (nk + nj + 2 * nm + -2) / 16)))); c1++) { if (c1 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) < nm + -1?((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) < nm + -1?((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nl > nm?nl : nm); c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = (nj > nl?nj : nl); c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = (nj > nm?nj : nm); c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } if (c1 == c2) { #pragma omp simd for (c4 = nm; c4 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } for (c3 = nj; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } if (c1 == c2) { #pragma omp simd for (c4 = nj; c4 <= ((16 * c1 + 15 < nl + -1?16 * c1 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } for (c3 = (nj > nm?nj : nm); c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = (nk > nm?nk : nm); c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = (nj > nk?nj : nk); c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)))) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= 16 * c2 + 15; c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nk + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nm + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nm + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) < nm + -1?((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) < nl + -1?((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)) : nl + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } #pragma omp simd for (c4 = nj; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } for (c3 = nk; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16))) { for (c2 = (((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nj; c3 <= 16 * c1 + 15; c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))))) { for (c2 = 0; c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nk * 16 < 0?-(-nk / 16) : ((16 < 0?(-nk + - 16 - 1) / - 16 : (nk + 16 - 1) / 16))))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = (nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))); c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) { for (c2 = (((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) > ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))?((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16)))) : ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } if (ni <= -1 && nj >= 1 && nm <= -1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nk + -1?16 * c1 + 15 : nk + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c4++) { B[c3][c4] = ((double )c3) * (c4 + 1) / nj; } } } } } if (ni <= -1 && nj <= -1 && nk >= 0 && nl >= 1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } if (ni <= -1 && nj >= 0 && nk <= -1 && nm >= 1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nj + nm + -1) * 16 < 0?((16 < 0?-((-(nj + nm + -1) + 16 + 1) / 16) : -((-(nj + nm + -1) + 16 - 1) / 16))) : (nj + nm + -1) / 16)); c1++) { if (c1 <= (((((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) < nm + -1?((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)) : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) < nm + -1?((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)) : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } #pragma omp simd for (c4 = nl; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } #pragma omp simd for (c4 = nm; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } for (c3 = nm; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } for (c3 = nj; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)) && c1 >= ((nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c2 = (0 > ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))?0 : ((nl * 16 < 0?-(-nl / 16) : ((16 < 0?(-nl + - 16 - 1) / - 16 : (nl + 16 - 1) / 16))))); c2 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nm + -1?16 * c2 + 15 : nm + -1)); c4++) { C[c3][c4] = ((double )c3) * (c4 + 3) / nl; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)) && c1 >= ((nj * 16 < 0?-(-nj / 16) : ((16 < 0?(-nj + - 16 - 1) / - 16 : (nj + 16 - 1) / 16))))) { for (c2 = 0; c2 <= (((((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) < (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))?(((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)) : (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)))); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } if (c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16))) { for (c2 = (nm * 16 < 0?-(-nm / 16) : ((16 < 0?(-nm + - 16 - 1) / - 16 : (nm + 16 - 1) / 16))); c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } if (ni <= -1 && nj <= -1 && nk <= -1 && nl >= 1) { #pragma omp parallel for private(c2, c4, c3) for (c1 = 0; c1 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c3 = 16 * c1; c3 <= ((16 * c1 + 15 < nm + -1?16 * c1 + 15 : nm + -1)); c3++) { #pragma omp simd for (c4 = 16 * c2; c4 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c4++) { D[c3][c4] = ((double )c3) * (c4 + 2) / nk; } } } } } } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni,int nl,double G[128 + 0][128 + 0]) { int i; int j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf(stderr,"%0.2lf ",G[i][j]); if ((i * ni + j) % 20 == 0) fprintf(stderr,"\n"); } fprintf(stderr,"\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni,int nj,int nk,int nl,int nm,double E[128 + 0][128 + 0],double A[128 + 0][128 + 0],double B[128 + 0][128 + 0],double F[128 + 0][128 + 0],double C[128 + 0][128 + 0],double D[128 + 0][128 + 0],double G[128 + 0][128 + 0]) { // int i; // int j; // int k; //#pragma scop { int c5; int c10; int c2; int c1; int c6; int c7; if (ni >= 0 && nj >= 0 && nl >= 1) { #pragma omp parallel for private(c7, c2, c10) for (c1 = 0; c1 <= (((nj + ni + -1) * 16 < 0?((16 < 0?-((-(nj + ni + -1) + 16 + 1) / 16) : -((-(nj + ni + -1) + 16 - 1) / 16))) : (nj + ni + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { if (c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16))) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) { G[c10][c7] = 0; } } } if (c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16))) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) { F[c10][c7] = 0; } } } } } } if (ni <= -1 && nl >= 1) { #pragma omp parallel for private(c7, c2, c10) for (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) { F[c10][c7] = 0; } } } } } if (nj <= -1 && nl >= 1) { #pragma omp parallel for private(c7, c2, c10) for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) { G[c10][c7] = 0; } } } } } if (nl >= 1 && nm >= 1) { #pragma omp parallel for private(c7, c6, c2, c10, c5) for (c1 = 0; c1 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c2++) { for (c5 = 0; c5 <= (((nm + -1) * 16 < 0?((16 < 0?-((-(nm + -1) + 16 + 1) / 16) : -((-(nm + -1) + 16 - 1) / 16))) : (nm + -1) / 16)); c5++) { for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nm + -1?16 * c5 + 15 : nm + -1)); c6++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nl + -1?16 * c2 + 15 : nl + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < nj + -1?16 * c1 + 15 : nj + -1)); c10++) { F[c10][c7] += C[c10][c6] * D[c6][c7]; } } } } } } } if (nj >= 1) { #pragma omp parallel for private(c7, c2, c10) for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) { E[c10][c7] = 0; } } } } } if (nj >= 1) { #pragma omp parallel for private(c7, c6, c2, c10, c5) for (c1 = 0; c1 <= (((ni + -1) * 16 < 0?((16 < 0?-((-(ni + -1) + 16 + 1) / 16) : -((-(ni + -1) + 16 - 1) / 16))) : (ni + -1) / 16)); c1++) { for (c2 = 0; c2 <= (((nj + -1) * 16 < 0?((16 < 0?-((-(nj + -1) + 16 + 1) / 16) : -((-(nj + -1) + 16 - 1) / 16))) : (nj + -1) / 16)); c2++) { for (c5 = 0; c5 <= (((nk + -1) * 16 < 0?((16 < 0?-((-(nk + -1) + 16 + 1) / 16) : -((-(nk + -1) + 16 - 1) / 16))) : (nk + -1) / 16)); c5++) { for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nk + -1?16 * c5 + 15 : nk + -1)); c6++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) { E[c10][c7] += A[c10][c6] * B[c6][c7]; } } } } for (c5 = 0; c5 <= (((nl + -1) * 16 < 0?((16 < 0?-((-(nl + -1) + 16 + 1) / 16) : -((-(nl + -1) + 16 - 1) / 16))) : (nl + -1) / 16)); c5++) { for (c6 = 16 * c5; c6 <= ((16 * c5 + 15 < nl + -1?16 * c5 + 15 : nl + -1)); c6++) { for (c7 = 16 * c2; c7 <= ((16 * c2 + 15 < nj + -1?16 * c2 + 15 : nj + -1)); c7++) { #pragma omp simd for (c10 = 16 * c1; c10 <= ((16 * c1 + 15 < ni + -1?16 * c1 + 15 : ni + -1)); c10++) { G[c10][c6] += E[c10][c7] * F[c7][c6]; } } } } } } } } //#pragma endscop } int main(int argc,char **argv) { /* Retrieve problem size. */ int ni = 128; int nj = 128; int nk = 128; int nl = 128; int nm = 128; /* Variable declaration/allocation. */ double (*E)[128 + 0][128 + 0]; E = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*A)[128 + 0][128 + 0]; A = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*B)[128 + 0][128 + 0]; B = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*F)[128 + 0][128 + 0]; F = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*C)[128 + 0][128 + 0]; C = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*D)[128 + 0][128 + 0]; D = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; double (*G)[128 + 0][128 + 0]; G = ((double (*)[128 + 0][128 + 0])(polybench_alloc_data(((128 + 0) * (128 + 0)),(sizeof(double ))))); ; /* Initialize array(s). */ init_array(ni,nj,nk,nl,nm, *A, *B, *C, *D); /* Start timer. */ polybench_timer_start(); ; /* Run kernel. */ kernel_3mm(ni,nj,nk,nl,nm, *E, *A, *B, *F, *C, *D, *G); /* Stop and print timer. */ polybench_timer_stop(); ; polybench_timer_print(); ; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ if (argc > 42 && !strcmp(argv[0],"")) print_array(ni,nl, *G); /* Be clean. */ free(((void *)E)); ; free(((void *)A)); ; free(((void *)B)); ; free(((void *)F)); ; free(((void *)C)); ; free(((void *)D)); ; free(((void *)G)); ; return 0; }
remote_matrix.h
#pragma once #include "adabs/tools/tools.h" #include "adabs/tools/tile.h" #include "adabs/matrix_base.h" #include "adabs/matrix.h" namespace adabs { /** * This is a tile based remote matrix class. * * Note: You should not inheriate from this matrix. */ template <typename T, int tile_size> class remote_matrix : public matrix_base { /************************ TYPEDEFS ***************************/ private: typedef tools::tile<T, tile_size> tile; typedef tile* dataT; typedef adabs::matrix<T, tile_size> sourceT; public: typedef T value_type; /************************ VARIABLES **************************/ private: dataT *_data; const int _nb_tiles_x; const int _nb_tiles_y; const pgas_addr <sourceT> _addr; /********************* CON/DESTRUCTOS ************************/ private: public: /** * Copy constructor to create a copy of @param cpy */ remote_matrix (const pgas_addr <sourceT>& addr) : matrix_base(addr), _addr(addr), _nb_tiles_x((get_size_x()%tile_size == 0) ? (get_size_x()/tile_size) : (get_size_x()/tile_size+1)), _nb_tiles_y((get_size_y()%tile_size == 0) ? (get_size_y()/tile_size) : (get_size_y()/tile_size+1)) { const int tiles_y = get_nb_tile_y(); const int tiles_x = get_nb_tile_x(); _data = new dataT[tiles_y*tiles_x]; for (int i=0; i<tiles_y*tiles_x; ++i) _data[i] = 0; } /** * Desctructor, make sure to not delete the object before(!) all * reads to that matrix are completed. */ ~remote_matrix() { for (int i = 0; i<get_nb_tile_y()*get_nb_tile_x(); ++i) { delete _data[i]; } delete []_data; } /************************ FUNCTIONS **************************/ private: public: /** * Copies the values stored in * @param ptr[0 ... tile_size*tile_size] * to the tile with the coordinates * @param x and @param y * and marks the values as initialized. If @param *ptr is * identical to a pointer returned by get_tile_unitialized * no data will be copied. */ void set_tile(T const * restrict const ptr, const int x, const int y, const bool sent=true); /** * Returns a pointer to the tile with the coordinated * @param x and @param y * . In case the values are not yet written to the matrix, the * calling thread will sleep until the value is returned. */ T const* get_tile(const int x, const int y); /** * Returns the pointer to the matrix internal tile with the * coordinates * @param x and @param y * so one can update the matrix in place. You must(!) still call * set_tile() for this matrix tile! */ T* get_tile_unitialized(const int x, const int y); /** * Returns the tile size */ static int get_tile_size() {return tile_size;} /** * Returns the number of tiles in x-dimension */ int get_nb_tile_x() const { return _nb_tiles_x; } /** * Returns the number of tiles in y-dimension */ int get_nb_tile_y() const { return _nb_tiles_y; } void* pgas_get(const int x, const int y) const { throw "should not be called"; } void pgas_mark(const int x, const int y) { if (_data[y*_nb_tiles_x + x] == 0) { std::cerr << "Error that should not be 12c" << std::endl; exit(-1); } __sync_lock_test_and_set (&(_data[y*_nb_tiles_x + x]->flag), 2); } int pgas_tile_size() const { return get_tile_size()*get_tile_size()*sizeof(T); } void clear_cache() { for (int i = 0; i<get_nb_tile_y()*get_nb_tile_x(); ++i) { if (_data[i] != 0) __sync_lock_test_and_set (&(_data[i]->flag), 0); } } }; template <typename T, int tile_size> T* remote_matrix<T, tile_size>::get_tile_unitialized(const int x, const int y) { #pragma omp critical { if (_data[y*_nb_tiles_x + x] == 0) { _data[y*_nb_tiles_x + x] = new tile(); } } return _data[y*_nb_tiles_x + x]->data; } template <typename T, int tile_size> void remote_matrix<T, tile_size>::set_tile(T const * restrict const ptr, const int x, const int y, const bool sent) { GASNET_BEGIN_FUNCTION(); using namespace adabs::tools; if (_data[y*_nb_tiles_x + x] == 0 || _data[y*_nb_tiles_x + x]->data != ptr) { throw "Error"; } __sync_lock_test_and_set (&_data[y*_nb_tiles_x + x]->flag, 2); if (sent) { // dataT is tile* tile *dest_ptr1 = static_cast<tile*>(pgas_get_data_ptr()); dest_ptr1 += y*_nb_tiles_x + x; void *dest_ptr2 = (void*)(dest_ptr1); GASNET_CALL(gasnet_AMRequestLong4(_addr.get_node(), adabs::impl::MATRIX_BASE_SET, (void*)_data[y*_nb_tiles_x + x]->data, pgas_tile_size(), dest_ptr2, get_low(_addr.get_ptr()), get_high(_addr.get_ptr()), x, y ) ) } } template <typename T, int tile_size> T const* remote_matrix<T, tile_size>::get_tile(const int x, const int y) { GASNET_BEGIN_FUNCTION(); using namespace adabs::tools; // in cache? if (_data[y*_nb_tiles_x + x]!= 0 && _data[y*_nb_tiles_x + x]->flag == 2) { //std::cout << gasnet_mynode() << " cached" << std::endl; return _data[y*_nb_tiles_x + x]->data; } // get dest_ptr here to make sure the tile exists T* dest_ptr = get_tile_unitialized(x, y); bool request = __sync_bool_compare_and_swap (&_data[y*_nb_tiles_x + x]->flag, 0, 1); // get data from source //std::cout << gasnet_mynode() << " remote" << std::endl; if (request) { //std::cout << gasnet_mynode() << " request startet for " << x << ", " << y << std::endl; GASNET_CALL(gasnet_AMRequestShort8(_addr.get_node(), adabs::impl::MATRIX_BASE_GET, get_low(_addr.get_ptr()), get_high(_addr.get_ptr()), get_low(this), get_high(this), get_low(dest_ptr), get_high(dest_ptr), x, y ) ) } //volatile int *reader = &_data[y*_nb_tiles_x + x]->flag; //while (*reader != 2){} GASNET_BLOCKUNTIL(_data[y*_nb_tiles_x + x]->flag == 2); return _data[y*_nb_tiles_x + x]->data; } }
mlp_example_f32_numa.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <libxsmm_sync.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif #include <numa.h> #define CHECK_L1 /* include c-based dnn library */ #include "../common/dnn_common.h" LIBXSMM_INLINE void my_init_buf(float* buf, size_t size, int initPos, int initOne) { int i; zero_buf(buf, size); for (i = 0; i < (int)size; ++i) { buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0))); } } typedef enum my_eltwise_fuse { MY_ELTWISE_FUSE_NONE = 0, MY_ELTWISE_FUSE_BIAS = 1, MY_ELTWISE_FUSE_RELU = 2, MY_ELTWISE_FUSE_BIAS_RELU = MY_ELTWISE_FUSE_BIAS | MY_ELTWISE_FUSE_RELU } my_eltwise_fuse; typedef enum my_pass { MY_PASS_FWD = 1, MY_PASS_BWD_D = 2, MY_PASS_BWD_W = 4, MY_PASS_BWD = 6 } my_pass; typedef struct my_opt_config { libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; float lr; size_t scratch_size; libxsmm_barrier* barrier; } my_opt_config; typedef struct my_smax_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; libxsmm_barrier* barrier; } my_smax_fwd_config; typedef struct my_smax_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint threads; size_t scratch_size; float loss_weight; libxsmm_barrier* barrier; } my_smax_bwd_config; typedef struct my_fc_fwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint fwd_bf; libxsmm_blasint fwd_2d_blocking; libxsmm_blasint fwd_row_teams; libxsmm_blasint fwd_column_teams; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_smmfunction_reducebatch_strd gemm_fwd; libxsmm_smmfunction_reducebatch_strd gemm_fwd2; } my_fc_fwd_config; typedef struct my_fc_bwd_config { libxsmm_blasint N; libxsmm_blasint C; libxsmm_blasint K; libxsmm_blasint bn; libxsmm_blasint bc; libxsmm_blasint bk; libxsmm_blasint threads; my_eltwise_fuse fuse_type; libxsmm_blasint bwd_bf; libxsmm_blasint bwd_2d_blocking; libxsmm_blasint bwd_row_teams; libxsmm_blasint bwd_column_teams; libxsmm_blasint upd_bf; libxsmm_blasint upd_2d_blocking; libxsmm_blasint upd_row_teams; libxsmm_blasint upd_column_teams; libxsmm_blasint ifm_subtasks; libxsmm_blasint ofm_subtasks; size_t scratch_size; libxsmm_barrier* barrier; libxsmm_smmfunction_reducebatch_strd gemm_bwd; libxsmm_smmfunction_reducebatch_strd gemm_bwd2; libxsmm_smmfunction_reducebatch_strd gemm_upd; libxsmm_smmfunction_reducebatch_strd gemm_upd2; libxsmm_xtransfunction tr_kernel; } my_fc_bwd_config; typedef struct my_numa_thr_cfg { int thr_s; int thr_e; int *blocksOFm_s; int *blocksOFm_e; int *blocksIFm_s; int *blocksIFm_e; int *blocksOFm_tr_s; int *blocksOFm_tr_e; int *blocksIFm_tr_s; int *blocksIFm_tr_e; float **scratch; size_t *layer_size; int **fwd_ofm_to_numa; float *bwd_d_scratch; size_t bwd_d_scratch_size; float *bwd_w_scratch; size_t bwd_w_layer_size; } my_numa_thr_cfg; my_fc_fwd_config setup_my_fc_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_fwd_config res; libxsmm_blasint lda = bk; libxsmm_blasint ldb = bc; libxsmm_blasint ldc = bk; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.fwd_bf = 1; res.fwd_2d_blocking = 1; res.fwd_row_teams = 2; res.fwd_column_teams = 8; } else { res.fwd_bf = 1; res.fwd_2d_blocking = 0; res.fwd_row_teams = 1; res.fwd_column_teams = 1; } #if 0 res.fwd_bf = atoi(getenv("FWD_BF")); res.fwd_2d_blocking = atoi(getenv("FWD_2D_BLOCKING")); res.fwd_row_teams = atoi(getenv("FWD_ROW_TEAMS")); res.fwd_column_teams = atoi(getenv("FWD_COLUMN_TEAMS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ res.gemm_fwd = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, NULL, NULL); if ( res.gemm_fwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd failed. Bailing...!\n"); exit(-1); } res.gemm_fwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bk, res.bn, res.bc, res.bk*res.bc*sizeof(float), res.bc*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL); if ( res.gemm_fwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_fwd2 failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = 0; return res; } my_fc_bwd_config setup_my_fc_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, my_eltwise_fuse fuse_type) { my_fc_bwd_config res; const libxsmm_trans_descriptor* tr_desc = 0; libxsmm_descriptor_blob blob; libxsmm_blasint lda = bc; libxsmm_blasint ldb = bk; libxsmm_blasint ldc = bc; float alpha = 1.0f; float beta = 1.0f; float zerobeta = 0.0f; int updflags = LIBXSMM_GEMM_FLAGS( 'N', 'T' ); libxsmm_blasint updM; libxsmm_blasint updN; /* setting up some handle values */ res.N = N; res.C = C; res.K = K; res.bn = bn; res.bc = bc; res.bk = bk; res.threads = threads; res.fuse_type = fuse_type; /* setup parallelization strategy */ if (threads == 16) { res.bwd_bf = 1; res.bwd_2d_blocking = 1; res.bwd_row_teams = 2; res.bwd_column_teams = 8; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_row_teams = 1; res.upd_column_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } else { res.bwd_bf = 1; res.bwd_2d_blocking = 0; res.bwd_row_teams = 1; res.bwd_column_teams = 1; res.upd_bf = 1; res.upd_2d_blocking = 0; res.upd_row_teams = 1; res.upd_column_teams = 1; res.ifm_subtasks = 1; res.ofm_subtasks = 1; } #if 0 res.bwd_bf = atoi(getenv("BWD_BF")); res.bwd_2d_blocking = atoi(getenv("BWD_2D_BLOCKING")); res.bwd_row_teams = atoi(getenv("BWD_ROW_TEAMS")); res.bwd_column_teams = atoi(getenv("BWD_COLUMN_TEAMS")); res.upd_bf = atoi(getenv("UPD_BF")); res.upd_2d_blocking = atoi(getenv("UPD_2D_BLOCKING")); res.upd_row_teams = atoi(getenv("UPD_ROW_TEAMS")); res.upd_column_teams = atoi(getenv("UPD_COLUMN_TEAMS")); res.ifm_subtasks = atoi(getenv("IFM_SUBTASKS")); res.ofm_subtasks = atoi(getenv("OFM_SUBTASKS")); #endif /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* TPP creation */ /* BWD GEMM */ res.gemm_bwd = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, NULL, NULL); if ( res.gemm_bwd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd failed. Bailing...!\n"); exit(-1); } res.gemm_bwd2 = libxsmm_smmdispatch_reducebatch_strd(res.bc, res.bn, res.bk, res.bk*res.bc*sizeof(float), res.bk*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, NULL, NULL); if ( res.gemm_bwd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_bwd2 failed. Bailing...!\n"); exit(-1); } /* Transpose kernel used for weight transpose in bwd pass */ tr_desc = libxsmm_trans_descriptor_init(&blob, sizeof(float), res.bk, res.bc, res.bc); res.tr_kernel = libxsmm_dispatch_trans(tr_desc); if ( res.tr_kernel == NULL ) { fprintf( stderr, "JIT for transpose TPP tr_kernel failed. Bailing...!\n"); exit(-1); } /* UPD GEMM */ lda = res.bk; ldb = res.bc; ldc = res.bk; updM = res.bk/res.ofm_subtasks; updN = res.bc/res.ifm_subtasks; res.gemm_upd = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn, res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &beta, &updflags, NULL); if ( res.gemm_upd == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd failed. Bailing...!\n"); exit(-1); } res.gemm_upd2 = libxsmm_smmdispatch_reducebatch_strd(updM, updN, res.bn, res.K*res.bn*sizeof(float), res.C*res.bn*sizeof(float), &lda, &ldb, &ldc, &alpha, &zerobeta, &updflags, NULL); if ( res.gemm_upd2 == NULL ) { fprintf( stderr, "JIT for BRGEMM TPP gemm_upd2 failed. Bailing...!\n"); exit(-1); } /* init scratch */ res.scratch_size = sizeof(float) * ( (((size_t)res.C + (size_t)res.K) * (size_t)res.N) + ((size_t)res.C * (size_t)res.K) ); return res; } my_opt_config setup_my_opt(libxsmm_blasint C, libxsmm_blasint K, libxsmm_blasint bc, libxsmm_blasint bk, libxsmm_blasint threads, float lr) { my_opt_config res; /* setting up some handle values */ res.C = C; res.K = K; res.bc = bc; res.bk = bk; res.threads = threads; res.lr = lr; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_fwd_config setup_my_smax_fwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads) { my_smax_fwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } my_smax_bwd_config setup_my_smax_bwd(libxsmm_blasint N, libxsmm_blasint C, libxsmm_blasint bn, libxsmm_blasint bc, libxsmm_blasint threads, float loss_weight) { my_smax_bwd_config res; /* setting up some handle values */ res.C = C; res.N = N; res.bc = bc; res.bn = bn; res.threads = threads; res.loss_weight = loss_weight; /* setting up the barrier */ res.barrier = libxsmm_barrier_create(threads, 1); /* init scratch */ res.scratch_size = 0; return res; } void my_fc_fwd_exec( my_fc_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr, const float* bias_ptr, unsigned char* relu_ptr, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer) { const libxsmm_blasint nBlocksIFm = cfg.C / cfg.bc; const libxsmm_blasint nBlocksOFm = cfg.K / cfg.bk; const libxsmm_blasint nBlocksMB = cfg.N / cfg.bn; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint mb1ofm1 = 0, mb1 = 0, ofm1 = 0, ifm1 = 0, mb2 = 0, ofm2 = 0; libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0; libxsmm_blasint my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0; libxsmm_blasint my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0; LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, cfg.bn, cfg.bc); LIBXSMM_VLA_DECL(4, const float, filter, numa_thr_cfg->scratch[layer], nBlocksIFm, cfg.bc, cfg.bk); LIBXSMM_VLA_DECL(2, const float, bias, bias_ptr, cfg.bk); LIBXSMM_VLA_DECL(4, unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk); unsigned long long blocks = nBlocksIFm; libxsmm_blasint CB_BLOCKS = nBlocksIFm, BF = 1; LIBXSMM_UNUSED( scratch ); BF = cfg.fwd_bf; CB_BLOCKS = nBlocksIFm/BF; blocks = CB_BLOCKS; row_teams = cfg.fwd_row_teams; column_teams = cfg.fwd_column_teams; my_col_id = ltid % column_teams; my_row_id = ltid / column_teams; im_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, row_teams); in_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, column_teams); my_im_start = LIBXSMM_MIN(my_row_id * im_tasks_per_thread, nBlocksMB); my_im_end = LIBXSMM_MIN((my_row_id+1) * im_tasks_per_thread, nBlocksMB); my_in_start = LIBXSMM_MIN(my_col_id * in_tasks_per_thread, nBlocksOFm); my_in_end = LIBXSMM_MIN((my_col_id+1) * in_tasks_per_thread, nBlocksOFm); const libxsmm_blasint ofm_start = numa_thr_cfg->blocksOFm_s[layer]; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); if (cfg.fwd_2d_blocking == 1) { if (BF > 1) { for (ifm1 = 0; ifm1 < BF; ++ifm1) { for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) { for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) { /* Initialize output slice */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } } else { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0; } } } } /* BRGEMM */ cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); /* apply post BRGEMM fusion */ if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } } else { for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) { for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } else { cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } /* post GEMM fusion */ if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } else { if (BF > 1) { for ( ifm1 = 0; ifm1 < BF; ++ifm1 ) { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; /* Initialize output slice */ if ( ifm1 == 0 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } } else { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (float)0; } } } } /* BRGEMM */ cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, ifm1*CB_BLOCKS, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); /* post GEMM fusion */ if ( ifm1 == BF-1 ) { if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = (l_cur_out > (float)0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } } else { for ( mb1ofm1 = thr_begin; mb1ofm1 < thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = LIBXSMM_VLA_ACCESS(2, bias, ofm1, ofm2, cfg.bk); } } cfg.gemm_fwd( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } else { cfg.gemm_fwd2( &LIBXSMM_VLA_ACCESS(4, filter, ofm1-ofm_start, 0, 0, 0, nBlocksIFm, cfg.bc, cfg.bk), &LIBXSMM_VLA_ACCESS(4, input, mb1, 0, 0, 0, nBlocksIFm, cfg.bn, cfg.bc), &LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, 0, 0, nBlocksOFm, cfg.bn, cfg.bk), &blocks); } /* post GEMM fusion */ if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = (unsigned char)(( l_cur_out > (float)0 ) ? 1 : 0); l_cur_out = ( l_cur_out > (float)0 ) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, output, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } void my_fc_bwd_d_transpose( my_fc_bwd_config cfg, int my_tid, my_numa_thr_cfg **numa_thr_cfg_, int numa_node, int layer, int *ofm_to_node) { int max_cfg_nodes = numa_num_configured_nodes(); my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; /* Transpose kernel to transpose filters */ libxsmm_xtransfunction tr_kernel = cfg.tr_kernel; /* here we assume that input and output blocking is similar */ const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / bc; const libxsmm_blasint nBlocksOFm = cfg.K / bk; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - numa_thr_cfg[numa_node].thr_s; const libxsmm_blasint l_nBlocksIFm = (numa_thr_cfg[numa_node].blocksIFm_tr_e[layer] - numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) + 1; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint transpose_work = l_nBlocksIFm * nBlocksOFm; /* compute chunk size */ int thr = numa_thr_cfg[numa_node].thr_e - numa_thr_cfg[numa_node].thr_s; const libxsmm_blasint transpose_chunksize = (transpose_work % thr == 0) ? (transpose_work / thr) : ((transpose_work / thr) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint transpose_thr_begin = (ltid * transpose_chunksize < transpose_work) ? (ltid * transpose_chunksize) : transpose_work; const libxsmm_blasint transpose_thr_end = ((ltid + 1) * transpose_chunksize < transpose_work) ? ((ltid + 1) * transpose_chunksize) : transpose_work; float *filter_tr = numa_thr_cfg[numa_node].bwd_d_scratch; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, my_tid); /* transpose weight */ int ifm1ofm1 = 0; for (ifm1ofm1 = transpose_thr_begin; ifm1ofm1 < transpose_thr_end; ++ifm1ofm1) { const unsigned int ubk = (unsigned int)bk; const unsigned int ubc = (unsigned int)bc; int ofm1 = ifm1ofm1 / l_nBlocksIFm; int ifm1 = ifm1ofm1 % l_nBlocksIFm; my_numa_thr_cfg *l_numa_thr_cfg = &numa_thr_cfg[ofm_to_node[ofm1]]; float *inp = l_numa_thr_cfg->scratch[layer]; inp = inp + (ofm1 - l_numa_thr_cfg->blocksOFm_s[layer]) * nBlocksIFm * bc * bk + (ifm1 + numa_thr_cfg[numa_node].blocksIFm_tr_s[layer]) * bc * bk; float *out = filter_tr + ifm1 * nBlocksOFm * bk * bc + ofm1 * bk * bc; tr_kernel(inp, &ubk, out, &ubc); } libxsmm_barrier_wait(cfg.barrier, my_tid); } void my_fc_bwd_exec( my_fc_bwd_config cfg, float* din_act_ptr, float* dout_act_ptr, float* dwt_ptr, const float* in_act_ptr, float* dbias_ptr, const unsigned char* relu_ptr, my_pass pass, int start_tid, int my_tid, void* scratch, my_numa_thr_cfg *numa_thr_cfg, int layer ) { /* here we assume that input and output blocking is similar */ const libxsmm_blasint bn = cfg.bn; const libxsmm_blasint bk = cfg.bk; const libxsmm_blasint bc = cfg.bc; const libxsmm_blasint nBlocksIFm = cfg.C / bc; const libxsmm_blasint nBlocksOFm = cfg.K / bk; const libxsmm_blasint nBlocksMB = cfg.N / bn; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint eltwise_work = nBlocksOFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint eltwise_chunksize = (eltwise_work % cfg.threads == 0) ? (eltwise_work / cfg.threads) : ((eltwise_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint eltwise_thr_begin = (ltid * eltwise_chunksize < eltwise_work) ? (ltid * eltwise_chunksize) : eltwise_work; const libxsmm_blasint eltwise_thr_end = ((ltid + 1) * eltwise_chunksize < eltwise_work) ? ((ltid + 1) * eltwise_chunksize) : eltwise_work; libxsmm_blasint mb1ofm1; /* number of tasks for transpose that could be run in parallel */ const libxsmm_blasint dbias_work = nBlocksOFm; /* compute chunk size */ const libxsmm_blasint dbias_chunksize = (dbias_work % cfg.threads == 0) ? (dbias_work / cfg.threads) : ((dbias_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint dbias_thr_begin = (ltid * dbias_chunksize < dbias_work) ? (ltid * dbias_chunksize) : dbias_work; const libxsmm_blasint dbias_thr_end = ((ltid + 1) * dbias_chunksize < dbias_work) ? ((ltid + 1) * dbias_chunksize) : dbias_work; /* loop variables */ libxsmm_blasint ofm1 = 0, mb1 = 0, ofm2 = 0, mb2 = 0; float *grad_output_ptr = (((cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU) ? ((float*)scratch)+(cfg.C*cfg.K) : dout_act_ptr); LIBXSMM_VLA_DECL(4, const float, doutput_orig, dout_act_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(4, float, doutput, grad_output_ptr, nBlocksOFm, bn, bk); LIBXSMM_VLA_DECL(2, float, dbias, dbias_ptr, cfg.bk); LIBXSMM_VLA_DECL(4, const unsigned char, relumask, relu_ptr, nBlocksOFm, cfg.bn, cfg.bk); const libxsmm_blasint ifm_start = numa_thr_cfg->blocksIFm_tr_s[layer]; /* lazy barrier init */ libxsmm_barrier_init(cfg.barrier, ltid); if ( (cfg.fuse_type & MY_ELTWISE_FUSE_RELU) == MY_ELTWISE_FUSE_RELU ) { for ( mb1ofm1 = eltwise_thr_begin; mb1ofm1 < eltwise_thr_end; ++mb1ofm1 ) { mb1 = mb1ofm1%nBlocksMB; ofm1 = mb1ofm1/nBlocksMB; for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { float l_cur_out = LIBXSMM_VLA_ACCESS(4, doutput_orig, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); l_cur_out = (LIBXSMM_VLA_ACCESS(4, relumask, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) != 0) ? l_cur_out : (float)0; LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk) = l_cur_out; } } } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (cfg.fuse_type & MY_ELTWISE_FUSE_BIAS) == MY_ELTWISE_FUSE_BIAS ) { for ( ofm1 = dbias_thr_begin; ofm1 < dbias_thr_end; ++ofm1 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) = 0.0f; } for ( mb1 = 0; mb1 < nBlocksMB; ++mb1 ) { for ( mb2 = 0; mb2 < cfg.bn; ++mb2 ) { for ( ofm2 = 0; ofm2 < cfg.bk; ++ofm2 ) { LIBXSMM_VLA_ACCESS( 2, dbias, ofm1, ofm2, cfg.bk ) += LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1, mb2, ofm2, nBlocksOFm, cfg.bn, cfg.bk); } } } } /* wait for eltwise to finish */ libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_D) == MY_PASS_BWD_D ) { const libxsmm_blasint use_2d_blocking = cfg.bwd_2d_blocking; /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /* loop variables */ libxsmm_blasint ifm1 = 0, ifm2 = 0, ifm1ofm1 = 0, mb1ifm1 = 0; libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0, my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0, my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0; LIBXSMM_VLA_DECL(4, float, dinput, din_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(4, float, filter_tr, numa_thr_cfg->bwd_d_scratch, nBlocksOFm, bk, bc); unsigned long long blocks = nBlocksOFm; libxsmm_blasint KB_BLOCKS = nBlocksOFm, BF = 1; BF = cfg.bwd_bf; KB_BLOCKS = nBlocksOFm/BF; blocks = KB_BLOCKS; if (use_2d_blocking == 1) { row_teams = cfg.bwd_row_teams; column_teams = cfg.bwd_column_teams; my_col_id = ltid % column_teams; my_row_id = ltid / column_teams; im_tasks_per_thread = LIBXSMM_UPDIV(nBlocksMB, row_teams); in_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, column_teams); my_im_start = LIBXSMM_MIN(my_row_id * im_tasks_per_thread, nBlocksMB); my_im_end = LIBXSMM_MIN((my_row_id+1) * im_tasks_per_thread, nBlocksMB); my_in_start = LIBXSMM_MIN(my_col_id * in_tasks_per_thread, nBlocksIFm); my_in_end = LIBXSMM_MIN((my_col_id+1) * in_tasks_per_thread, nBlocksIFm); } if (use_2d_blocking == 1) { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for (ifm1 = my_in_start; ifm1 < my_in_end; ++ifm1) { for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) { /* Initialize intermediate f32 tensor */ if ( ofm1 == 0 ) { for ( mb2 = 0; mb2 < bn; ++mb2 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0; } } } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { for (ifm1 = my_in_start; ifm1 < my_in_end; ++ifm1) { for (mb1 = my_im_start; mb1 < my_im_end; ++mb1) { cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, 0, 0, 0, nBlocksOFm, bk, bc), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } } else { if (BF > 1) { for ( ofm1 = 0; ofm1 < BF; ++ofm1 ) { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; /* Initialize intermediate f32 tensor */ if ( ofm1 == 0 ) { for ( mb2 = 0; mb2 < bn; ++mb2 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, mb2, ifm2, nBlocksIFm, bn, bc) = (float)0; } } } cfg.gemm_bwd( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, ofm1*KB_BLOCKS, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } else { for ( mb1ifm1 = thr_begin; mb1ifm1 < thr_end; ++mb1ifm1 ) { mb1 = mb1ifm1%nBlocksMB; ifm1 = mb1ifm1/nBlocksMB; cfg.gemm_bwd2( &LIBXSMM_VLA_ACCESS(4, filter_tr, ifm1 - ifm_start, 0, 0, 0, nBlocksOFm, bk, bc ), &LIBXSMM_VLA_ACCESS(4, doutput, mb1, 0, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, dinput, mb1, ifm1, 0, 0, nBlocksIFm, bn, bc), &blocks); } } } libxsmm_barrier_wait(cfg.barrier, ltid); } if ( (pass & MY_PASS_BWD_W) == MY_PASS_BWD_W ) { /* number of tasks that could be run in parallel */ const libxsmm_blasint ofm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ofm_subtasks; const libxsmm_blasint ifm_subtasks = (cfg.upd_2d_blocking == 1) ? 1 : cfg.ifm_subtasks; const libxsmm_blasint bbk = (cfg.upd_2d_blocking == 1) ? bk : bk/ofm_subtasks; const libxsmm_blasint bbc = (cfg.upd_2d_blocking == 1) ? bc : bc/ifm_subtasks; const libxsmm_blasint work = nBlocksIFm * ifm_subtasks * nBlocksOFm * ofm_subtasks; const libxsmm_blasint Cck_work = nBlocksIFm * ifm_subtasks * ofm_subtasks; const libxsmm_blasint Cc_work = nBlocksIFm * ifm_subtasks; /* 2D blocking parameters */ libxsmm_blasint use_2d_blocking = cfg.upd_2d_blocking; libxsmm_blasint im_tasks_per_thread = 0, in_tasks_per_thread = 0, my_in_start = 0, my_in_end = 0, my_im_start = 0, my_im_end = 0, my_row_id = 0, my_col_id = 0, row_teams = 0, column_teams = 0; /* compute chunk size */ const libxsmm_blasint chunksize = (work % cfg.threads == 0) ? (work / cfg.threads) : ((work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_blasint BF = cfg.upd_bf; /* loop variables */ libxsmm_blasint ifm1ofm1 = 0, ifm1 = 0, ifm2 = 0, bfn = 0, ii = 0, jj = 0; /* Batch reduce related variables */ unsigned long long blocks = nBlocksMB/BF; LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, nBlocksIFm, bn, bc); LIBXSMM_VLA_DECL(4, float, dfilter, dwt_ptr, nBlocksIFm, bc, bk); if (use_2d_blocking == 1) { row_teams = cfg.upd_row_teams; column_teams = cfg.upd_column_teams; my_col_id = ltid % column_teams; my_row_id = ltid / column_teams; im_tasks_per_thread = LIBXSMM_UPDIV(nBlocksIFm, row_teams); in_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, column_teams); my_im_start = LIBXSMM_MIN(my_row_id * im_tasks_per_thread, nBlocksIFm); my_im_end = LIBXSMM_MIN((my_row_id+1) * im_tasks_per_thread, nBlocksIFm); my_in_start = LIBXSMM_MIN(my_col_id * in_tasks_per_thread, nBlocksOFm); my_in_end = LIBXSMM_MIN((my_col_id+1) * in_tasks_per_thread, nBlocksOFm); } if (use_2d_blocking == 1) { if (BF == 1) { for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) { for (ifm1 = my_im_start; ifm1 < my_im_end; ++ifm1) { cfg.gemm_upd2(&LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, 0, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks); } } } else { for (bfn = 0; bfn < BF; bfn++) { for (ofm1 = my_in_start; ofm1 < my_in_end; ++ofm1) { for (ifm1 = my_im_start; ifm1 < my_im_end; ++ifm1) { /* initialize current work task to zero */ if (bfn == 0) { for (ii = 0; ii<bc; ii++) { for (jj = 0; jj<bk; jj++) { LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ii, jj, nBlocksIFm, bc, bk) = (float)0; } } } cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, 0, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, 0, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, 0, 0, nBlocksIFm, bc, bk), &blocks); } } } } } else { if (BF == 1) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; cfg.gemm_upd2( &LIBXSMM_VLA_ACCESS(4, doutput, 0, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, 0, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); } } else { for (bfn = 0; bfn < BF; bfn++) { for ( ifm1ofm1 = thr_begin; ifm1ofm1 < thr_end; ++ifm1ofm1 ) { ofm1 = ifm1ofm1 / Cck_work; ofm2 = (ifm1ofm1 % Cck_work) / Cc_work; ifm1 = ((ifm1ofm1 % Cck_work) % Cc_work) / ifm_subtasks; ifm2 = ((ifm1ofm1 % Cck_work) % Cc_work) % ifm_subtasks; /* initialize current work task to zero */ if (bfn == 0) { for (ii = 0; ii<bbc; ii++) { for (jj = 0; jj<bbk; jj++) { LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc+ii, ofm2*bbk+jj, nBlocksIFm, bc, bk) = (float)0; } } } cfg.gemm_upd( &LIBXSMM_VLA_ACCESS(4, doutput, bfn*blocks, ofm1, 0, ofm2*bbk, nBlocksOFm, bn, bk), &LIBXSMM_VLA_ACCESS(4, input, bfn*blocks, ifm1, 0, ifm2*bbc, nBlocksIFm, bn, bc), &LIBXSMM_VLA_ACCESS(4, dfilter, ofm1, ifm1, ifm2*bbc, ofm2*bbk, nBlocksIFm, bc, bk), &blocks); } } } } libxsmm_barrier_wait(cfg.barrier, ltid); } } void my_opt_exec( my_opt_config cfg, const float* delwt_ptr, int start_tid, int my_tid, my_numa_thr_cfg *numa_thr_cfg, int l, my_fc_fwd_config my_fc_fwd) { const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s; const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc; const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk; const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk; const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm; /* compute chunk size */ int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s; const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; libxsmm_barrier_init( cfg.barrier, my_tid ); __m512 vlr = _mm512_set1_ps( cfg.lr ); float *dw_prt = (float*)delwt_ptr + numa_thr_cfg->blocksOFm_s[l] * OFM_shift; int j = 0, i = 0; for (j = thr_begin; j < thr_end; j++) { int ofm = j / nBlocksIFm; int ifm = j % nBlocksIFm; float *out = numa_thr_cfg->scratch[l] + ofm * OFM_shift + ifm * IFM_shift; float *inp = dw_prt + ofm * OFM_shift + ifm * IFM_shift; #pragma unroll(16) for (i = 0; i < IFM_shift; i += 16) _mm512_storeu_ps( out+i, _mm512_sub_ps( _mm512_loadu_ps( out+i ), _mm512_mul_ps( vlr, _mm512_loadu_ps( inp + i ) ) ) ) ; } libxsmm_barrier_wait( cfg.barrier, my_tid ); } void my_smax_fwd_exec( my_smax_fwd_config cfg, const float* in_act_ptr, float* out_act_ptr, const int* label_ptr, float* loss, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; LIBXSMM_VLA_DECL(4, float, output, out_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(4, const float, input, in_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { float max = FLT_MIN; float sum_of_exp = 0.0f; img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); if ( LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ) > max ) { max = LIBXSMM_VLA_ACCESS( 4, input, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } } /* sum exp over outputs */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = (float)exp( (double)(LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - max) ); sum_of_exp += LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ); } } /* scale output */ sum_of_exp = 1.0f/sum_of_exp; for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * sum_of_exp; } } } libxsmm_barrier_wait( cfg.barrier, ltid ); /* calculate loss single threaded */ if ( ltid == 0 ) { (*loss) = 0.0f; for ( img1 = 0; img1 < Bn; ++img1 ) { for ( img2 = 0; img2 <bn; ++img2 ) { libxsmm_blasint ifm = (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ); libxsmm_blasint ifm1b = ifm/bc; libxsmm_blasint ifm2b = ifm%bc; float val = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) > FLT_MIN ) ? LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1b, img2, ifm2b, Bc, bn, bc ) : FLT_MIN; *loss = LIBXSMM_LOGF( val ); } } *loss = ((-1.0f)*(*loss))/cfg.N; } libxsmm_barrier_wait( cfg.barrier, ltid ); } void my_smax_bwd_exec( my_smax_bwd_config cfg, float* delin_act_ptr, const float* out_act_ptr, const int* label_ptr, int start_tid, int my_tid, void* scratch ) { libxsmm_blasint bn = cfg.bn; libxsmm_blasint Bn = cfg.N/cfg.bn; libxsmm_blasint bc = cfg.bc; libxsmm_blasint Bc = cfg.C/cfg.bc; /* loop counters */ libxsmm_blasint i = 0; libxsmm_blasint img1, img2, ifm1, ifm2; float rcp_N = 1.0f/cfg.N; /* computing first logical thread */ const libxsmm_blasint ltid = my_tid - start_tid; /* number of tasks that could run in parallel for the batch */ const libxsmm_blasint n_work = Bn * bn; /* compute chunk size */ const libxsmm_blasint n_chunksize = (n_work % cfg.threads == 0) ? (n_work / cfg.threads) : ((n_work / cfg.threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint n_thr_begin = (ltid * n_chunksize < n_work) ? (ltid * n_chunksize) : n_work; const libxsmm_blasint n_thr_end = ((ltid + 1) * n_chunksize < n_work) ? ((ltid + 1) * n_chunksize) : n_work; LIBXSMM_VLA_DECL(4, const float, output, out_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(4, float, dinput, delin_act_ptr, Bc, bn, bc); LIBXSMM_VLA_DECL(2, const int, label, label_ptr, bn); /* lazy barrier init */ libxsmm_barrier_init( cfg.barrier, ltid ); for ( i = n_thr_begin; i < n_thr_end; ++i ) { img1 = i/bn; img2 = i%bn; /* set output to input and set compute max per image */ for ( ifm1 = 0; ifm1 < Bc; ++ifm1 ) { for ( ifm2 = 0; ifm2 < bc; ++ifm2 ) { if ( (ifm1*Bc)+ifm2 == (libxsmm_blasint)LIBXSMM_VLA_ACCESS( 2, label, img1, img2, bn ) ) { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = ( LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) - 1.0f ) * rcp_N * cfg.loss_weight; } else { LIBXSMM_VLA_ACCESS( 4, dinput, img1, ifm1, img2, ifm2, Bc, bn, bc ) = LIBXSMM_VLA_ACCESS( 4, output, img1, ifm1, img2, ifm2, Bc, bn, bc ) * rcp_N * cfg.loss_weight; } } } } libxsmm_barrier_wait( cfg.barrier, ltid ); } void *numa_alloc_onnode_aligned(size_t size, int numa_node, int alignment_) { #if 0 int alignment = alignment_ - 1; size_t adj_size = sizeof(size_t) + alignment; void *r_ptr = NULL; void *t_ptr = numa_alloc_onnode(size + adj_size, numa_node); if (t_ptr == NULL) return NULL; r_ptr = (void *)(((size_t)t_ptr + adj_size) & ~alignment); *((size_t*)r_ptr - 1) = (size_t)r_ptr - (size_t)t_ptr; return r_ptr; #else return numa_alloc_onnode(size, numa_node); #endif } void numa_free_aligned(void *ptr, size_t size) { #if 0 if (ptr == NULL) return; void *t_ptr = (void*)((size_t*)ptr - *((size_t*)ptr - 1)); numa_free(t_ptr, size); #else numa_free(ptr, size); #endif } int setup_my_numa(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, int n_threads) { int max_nodes = numa_max_node() + 1; int max_cfg_nodes = numa_num_configured_nodes(); int max_cfg_cpus = numa_num_configured_cpus(); int max_task_cpus = numa_num_task_cpus(); my_numa_thr_cfg *numa_thr_cfg = (my_numa_thr_cfg *) malloc(sizeof(my_numa_thr_cfg) * max_cfg_nodes); printf("NUMA configuration:\n"); printf("There are %d numa nodes on the system\n", max_nodes); printf("There are %d configured numa nodes on the system\n", max_cfg_nodes); printf("There are %d configured CPUs on the system\n", max_cfg_cpus); printf("There are %d CPUs asigned for the current task\n", max_task_cpus); struct bitmask* bmask = numa_bitmask_alloc(max_cfg_cpus); int thr_count = 0, i = 0; for (i = 0; i < max_cfg_nodes; i++) { numa_node_to_cpus(i, bmask); numa_thr_cfg[i].scratch = (float**) malloc(sizeof(float*) * num_layers); numa_thr_cfg[i].layer_size = (size_t*)malloc(sizeof(size_t)*num_layers); numa_thr_cfg[i].blocksOFm_s = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksOFm_e = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksIFm_s = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksIFm_e = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksOFm_tr_s = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksOFm_tr_e = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksIFm_tr_s = (int*)malloc(sizeof(int)*num_layers); numa_thr_cfg[i].blocksIFm_tr_e = (int*)malloc(sizeof(int)*num_layers); /* printf("@@@@@ node %d size %zd cpus ", i, bmask->size); size_t j = 0; for(j = 0; j < bmask->size; j++) printf("%d", numa_bitmask_isbitset(bmask, j)); printf("\n"); */ int num_threads_in_mask = 0; int t = 0; for (t = 0; t < bmask->size; t++) if (numa_bitmask_isbitset(bmask, t)) num_threads_in_mask++; int thr_s = 0, thr_e = 0, node_threads = 0; while(thr_count < n_threads && node_threads < num_threads_in_mask) { if (numa_bitmask_isbitset(bmask, thr_count)) { numa_thr_cfg[i].thr_s = thr_count; break; } thr_count++; node_threads++; } while(thr_count < n_threads && node_threads < num_threads_in_mask) { if (numa_bitmask_isbitset(bmask, thr_count)) numa_thr_cfg[i].thr_e = thr_count; thr_count++; node_threads++; } } *numa_thr_cfg_ = numa_thr_cfg; return 1; } int setup_my_numa_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0; for (i = 0; i < max_cfg_nodes; i++) { int n_thr = numa_thr_cfg[i].thr_e - numa_thr_cfg[i].thr_s; int l = 0; for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk; const libxsmm_blasint nBlocksMB = my_fc_fwd[l].N / my_fc_fwd[l].bn; if (my_fc_fwd[l].fwd_bf > 1) { printf("@@@ NUMA ERROR: doesn't support this configuration\n"); return -1; } int thr = 0; if (my_fc_fwd[l].fwd_2d_blocking == 1) { libxsmm_blasint column_teams = my_fc_fwd[l].fwd_column_teams; libxsmm_blasint in_tasks_per_thread = LIBXSMM_UPDIV(nBlocksOFm, column_teams); numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm; numa_thr_cfg[i].blocksOFm_e[l] = 0; for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) { libxsmm_blasint my_col_id = thr % column_teams; /* ltid */ libxsmm_blasint my_in_start = LIBXSMM_MIN(my_col_id * in_tasks_per_thread, nBlocksOFm); libxsmm_blasint my_in_end = LIBXSMM_MIN((my_col_id+1) * in_tasks_per_thread, nBlocksOFm); numa_thr_cfg[i].blocksOFm_s[l] = (my_in_start < numa_thr_cfg[i].blocksOFm_s[l]) ? my_in_start : numa_thr_cfg[i].blocksOFm_s[l]; numa_thr_cfg[i].blocksOFm_e[l] = (my_in_end > numa_thr_cfg[i].blocksOFm_e[l]) ? my_in_end : numa_thr_cfg[i].blocksOFm_e[l]; } } else { numa_thr_cfg[i].blocksOFm_s[l] = nBlocksOFm; numa_thr_cfg[i].blocksOFm_e[l] = 0; for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) { const libxsmm_blasint work = nBlocksOFm * nBlocksMB; const libxsmm_blasint chunksize = (work % my_fc_fwd[l].threads == 0) ? (work / my_fc_fwd[l].threads) : ((work / my_fc_fwd[l].threads) + 1); const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work; const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work; int ofm_s = thr_begin / nBlocksMB; int ofm_e = (thr_end-1) / nBlocksMB; numa_thr_cfg[i].blocksOFm_s[l] = (ofm_s < numa_thr_cfg[i].blocksOFm_s[l]) ? ofm_s : numa_thr_cfg[i].blocksOFm_s[l]; numa_thr_cfg[i].blocksOFm_e[l] = (ofm_e > numa_thr_cfg[i].blocksOFm_e[l]) ? ofm_e : numa_thr_cfg[i].blocksOFm_e[l]; } #if 0 printf("numa_thr_cfg[%d].blocksOFm_s[%d] %d numa_thr_cfg[%d].blocksOFm_e[%d] %d\n", i, l, numa_thr_cfg[i].blocksOFm_s[l], i, l, numa_thr_cfg[i].blocksOFm_e[l]); #endif } } } return 1; } void set_fwd_ofm_to_node(int **fwd_ofm_to_node, my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) { int max_cfg_nodes = numa_num_configured_nodes(); my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int l, ofm, i; for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk; fwd_ofm_to_node[l] = (int*) malloc(sizeof(int) * nBlocksOFm); int *l_fwd_ofm_to_node = fwd_ofm_to_node[l]; for (i = 0; i < max_cfg_nodes; i++) { for (ofm = 0; ofm < nBlocksOFm; ofm++) { if (ofm >= numa_thr_cfg[i].blocksOFm_s[l] && ofm <= numa_thr_cfg[i].blocksOFm_e[l]) l_fwd_ofm_to_node[ofm] = i; } } } #if 0 for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk; int *l_fwd_ofm_to_node = fwd_ofm_to_node[l]; for (ofm = 0; ofm < nBlocksOFm; ofm++) printf("%d l_fwd_ofm_to_node[%d] %d | %d\n", l, ofm, l_fwd_ofm_to_node[ofm], nBlocksOFm); } #endif } void free_fwd_ofm_to_node(int **fwd_ofm_to_node, int num_layers) { int l; for (l = 0; l < num_layers; l++) { free(fwd_ofm_to_node[l]); } } int setup_my_numa_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0; for (i = 0; i < max_cfg_nodes; i++) { int n_thr = numa_thr_cfg[i].thr_e - numa_thr_cfg[i].thr_s; int l = 0; for (l = 0; l < num_layers; l++) { if (my_fc_bwd[l].bwd_bf > 1) { printf("@@@ NUMA ERROR: doesn't support this configuration\n"); return -1; } int thr = 0; const libxsmm_blasint nBlocksIFm = my_fc_bwd[l].C / my_fc_bwd[l].bc; const libxsmm_blasint nBlocksMB = my_fc_bwd[l].N / my_fc_bwd[l].bn; if (my_fc_bwd[l].bwd_2d_blocking == 1) { printf("@@@ NUMA ERROR: doesn't support this configuration\n"); return -1; } else { numa_thr_cfg[i].blocksIFm_tr_s[l] = nBlocksIFm; numa_thr_cfg[i].blocksIFm_tr_e[l] = 0; for (thr = numa_thr_cfg[i].thr_s; thr <= numa_thr_cfg[i].thr_e; thr++) { /* number of tasks that could be run in parallel */ const libxsmm_blasint work = nBlocksIFm * nBlocksMB; /* compute chunk size */ const libxsmm_blasint chunksize = (work % my_fc_bwd[l].threads == 0) ? (work / my_fc_bwd[l].threads) : ((work / my_fc_bwd[l].threads) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (thr * chunksize < work) ? (thr * chunksize) : work; const libxsmm_blasint thr_end = ((thr + 1) * chunksize < work) ? ((thr + 1) * chunksize) : work; int ifm_s = thr_begin / nBlocksMB; int ifm_e = (thr_end-1) / nBlocksMB; numa_thr_cfg[i].blocksIFm_tr_s[l] = (ifm_s < numa_thr_cfg[i].blocksIFm_tr_s[l]) ? ifm_s : numa_thr_cfg[i].blocksIFm_tr_s[l]; numa_thr_cfg[i].blocksIFm_tr_e[l] = (ifm_e > numa_thr_cfg[i].blocksIFm_tr_e[l]) ? ifm_e : numa_thr_cfg[i].blocksIFm_tr_e[l]; } #if 0 printf("numa_thr_cfg[%d].blocksIFm_tr_s[%d] %d numa_thr_cfg[%d].blocksIFm_tr_e[%d] %d\n", i, l, numa_thr_cfg[i].blocksIFm_tr_s[l], i, l, numa_thr_cfg[i].blocksIFm_tr_e[l]); #endif } } } return 1; } int allocate_numa_buffers_fwd(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0, j = 0, l = 0; for (i = 0; i < max_cfg_nodes; i++) { for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc; const libxsmm_blasint nBlocksOFm = my_fc_fwd[l].K / my_fc_fwd[l].bk; const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk; int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1; if (l_nBlocksOFm <= 0) continue; numa_thr_cfg[i].layer_size[l] = sizeof(float) * ((l_nBlocksOFm) * OFM_shift); numa_thr_cfg[i].scratch[l] = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].layer_size[l], i, 2097152); if (numa_thr_cfg[i].scratch[l] == NULL) { printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i); return -1; } } } return 1; } int allocate_numa_buffers_bwd_d(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_bwd_config* my_fc_bwd) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i = 0, j = 0, l = 0; for (i = 0; i < max_cfg_nodes; i++) { int l_nBlocksIFm = 0; for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksOFm = my_fc_bwd[l].K / my_fc_bwd[l].bk; const libxsmm_blasint IFM_shift = nBlocksOFm * my_fc_bwd[l].bc * my_fc_bwd[l].bk; if (l_nBlocksIFm <= ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift) l_nBlocksIFm = ((numa_thr_cfg[i].blocksIFm_tr_e[l] - numa_thr_cfg[i].blocksIFm_tr_s[l]) + 1) * IFM_shift; } numa_thr_cfg[i].bwd_d_scratch_size = sizeof(float) * (l_nBlocksIFm); numa_thr_cfg[i].bwd_d_scratch = (float*)numa_alloc_onnode_aligned(numa_thr_cfg[i].bwd_d_scratch_size, i, 2097152); if (numa_thr_cfg[i].bwd_d_scratch == NULL) { printf("@@@ NUMA ERROR: cannot allocate on node #%d\n", i); return -1; } } return 1; } int copy_to_numa_buffers_fwd_inf(my_numa_thr_cfg **numa_thr_cfg_, int num_layers, my_fc_fwd_config* my_fc_fwd, float **fil_libxsmm) { my_numa_thr_cfg *numa_thr_cfg = *numa_thr_cfg_; int max_cfg_nodes = numa_num_configured_nodes(); int i,l; #ifndef COPY_ON_LOCAL_NODES #pragma omp parallel for collapse(2) private (i,l) #else #pragma omp parallel private (i,l) { int tid = omp_get_thread_num(); #endif for (i = 0; i < max_cfg_nodes; i++) { #ifdef COPY_ON_LOCAL_NODES if (tid >= numa_thr_cfg[i].thr_s && tid <= numa_thr_cfg[i].thr_e) { numa_run_on_node(i); } if (tid == numa_thr_cfg[i].thr_s) { #endif for (l = 0; l < num_layers; l++) { const libxsmm_blasint nBlocksIFm = my_fc_fwd[l].C / my_fc_fwd[l].bc; const libxsmm_blasint BOFM_shift = nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk; int l_nBlocksOFm = (numa_thr_cfg[i].blocksOFm_e[l] - numa_thr_cfg[i].blocksOFm_s[l]) + 1; int j = 0; for (j = 0; j < l_nBlocksOFm ; j++) { size_t l_BOFM_shift = j * BOFM_shift; float *out = numa_thr_cfg[i].scratch[l] + l_BOFM_shift; float *inp = fil_libxsmm[l] + numa_thr_cfg[i].blocksOFm_s[l] * BOFM_shift + l_BOFM_shift; memcpy(out, inp, sizeof(float) * nBlocksIFm * my_fc_fwd[l].bc * my_fc_fwd[l].bk); } } #ifdef COPY_ON_LOCAL_NODES } #endif } #ifdef COPY_ON_LOCAL_NODES } #endif return 1; } int copy_to_numa_buffers_fwd(my_numa_thr_cfg *numa_thr_cfg, my_fc_fwd_config my_fc_fwd, float *fil_libxsmm, int numa_node, int l, int my_tid, int dir) { const libxsmm_blasint ltid = my_tid - numa_thr_cfg->thr_s; const libxsmm_blasint nBlocksIFm = my_fc_fwd.C / my_fc_fwd.bc; const libxsmm_blasint nBlocksMB = my_fc_fwd.N / my_fc_fwd.bn; const libxsmm_blasint IFM_shift = my_fc_fwd.bc * my_fc_fwd.bk; const libxsmm_blasint OFM_shift = nBlocksIFm * my_fc_fwd.bc * my_fc_fwd.bk; const libxsmm_blasint work = ((numa_thr_cfg->blocksOFm_e[l] - numa_thr_cfg->blocksOFm_s[l]) + 1) * nBlocksIFm; /* compute chunk size */ int thr = numa_thr_cfg->thr_e - numa_thr_cfg->thr_s; const libxsmm_blasint chunksize = (work % thr == 0) ? (work / thr) : ((work / thr) + 1); /* compute thr_begin and thr_end */ const libxsmm_blasint thr_begin = (ltid * chunksize < work) ? (ltid * chunksize) : work; const libxsmm_blasint thr_end = ((ltid + 1) * chunksize < work) ? ((ltid + 1) * chunksize) : work; /*libxsmm_barrier_init( my_fc_fwd.barrier, my_tid );*/ float *inp, *out; if (dir) { inp = numa_thr_cfg->scratch[l]; out = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift; } else { out = numa_thr_cfg->scratch[l]; inp = fil_libxsmm + numa_thr_cfg->blocksOFm_s[l] * OFM_shift; } int j = 0, i = 0; for (j = thr_begin; j < thr_end; j++) { int ofm = j / nBlocksIFm; int ifm = j % nBlocksIFm; float *l_out = out + ofm * OFM_shift + ifm * IFM_shift; float *l_inp = inp + ofm * OFM_shift + ifm * IFM_shift; memcpy(l_out, l_inp, sizeof(float) * IFM_shift); } /*libxsmm_barrier_wait( my_fc_fwd.barrier, my_tid );*/ return 1; } int main(int argc, char* argv[]) { float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm; float **bias_libxsmm, **delbias_libxsmm; unsigned char **relumask_libxsmm; int *label_libxsmm; my_eltwise_fuse my_fuse; my_fc_fwd_config* my_fc_fwd; my_fc_bwd_config* my_fc_bwd; my_opt_config* my_opt; my_smax_fwd_config my_smax_fwd; my_smax_bwd_config my_smax_bwd; void* scratch = NULL; size_t scratch_size = 0; /* some parameters we can overwrite via cli, default is some inner layer of overfeat */ int iters = 10; /* repetitions of benchmark */ int MB = 256; /* mini-batch size, "N" */ int fuse_type = 0; /* 0: nothing fused, 1: relu fused, 2: elementwise fused, 3: relu and elementwise fused */ char type = 'A'; /* 'A': ALL, 'F': FP, 'B': BP, 'U', WU */ int bn = 32; int bk = 32; int bc = 32; int *C; /* number of input feature maps, "C" */ int num_layers = 0; #if defined(_OPENMP) int nThreads = omp_get_max_threads(); /* number of threads */ #else int nThreads = 1; /* number of threads */ #endif unsigned long long l_start, l_end; unsigned long long *fwd_time, *bwd_time, *solver_time; double l_total = 0.0; double gflop = 0.0; int i, j; double fil_size = 0.0; double act_size = 0.0; float lr = 0.2f; float loss = 0; float loss_weight = 0.1f; libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff; libxsmm_matdiff_clear(&norms_fwd); libxsmm_matdiff_clear(&norms_bwd); libxsmm_matdiff_clear(&norms_upd); libxsmm_matdiff_clear(&diff); if (argc > 1 && !strncmp(argv[1], "-h", 3)) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* reading new values from cli */ i = 1; num_layers = argc - 9; if (argc > i) iters = atoi(argv[i++]); if (argc > i) MB = atoi(argv[i++]); if (argc > i) fuse_type = atoi(argv[i++]); if (argc > i) type = *(argv[i++]); if (argc > i) bn = atoi(argv[i++]); if (argc > i) bk = atoi(argv[i++]); if (argc > i) bc = atoi(argv[i++]); /* allocate the number of channles buffer */ if ( num_layers < 1 ) { printf("Usage: %s iters MB fuse_type type bn bk bc C1 C2 ... CN\n", argv[0]); return 0; } C = (int*)malloc((num_layers+2)*sizeof(int)); for (j = 0 ; i < argc; ++i, ++j ) { C[j] = atoi(argv[i]); } /* handle softmax config */ C[num_layers+1] = C[num_layers]; if (type != 'A' && type != 'F' && type != 'B') { printf("type needs to be 'A' (All), 'F' (FP only), 'B' (BP only)\n"); return -1; } if ( (fuse_type < 0) || (fuse_type > 5) ) { printf("fuse type needs to be 0 (None), 1 (Bias), 2 (ReLU), 3 (Sigmoid), 4 (Bias+ReLU), 5 (Bias+Sigmoid)\n"); return -1; } #if defined(__SSE3__) _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); #endif /* print some summary */ printf("##########################################\n"); printf("# Setting Up (Common) #\n"); printf("##########################################\n"); printf("PARAMS: N:%d\n", MB); printf("PARAMS: Layers: %d\n", num_layers); printf("PARAMS: ITERS:%d", iters); printf(" Threads:%d\n", nThreads); for (i = 0; i < num_layers; ++i ) { if (i == 0) { act_size += (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0); fil_size += (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*sizeof(float))/(1024.0*1024.0) ); printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*sizeof(float))/(1024.0*1024.0) ); } act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0); printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0) ); printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size ); printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size ); printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) ); /* allocate data */ /* +2 because of the softwax layer */ act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) ); delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) ); for ( i = 0 ; i < num_layers+2; ++i ) { #ifdef ACT_NUMA_INTERLEAVED act_libxsmm[i] = (float*)numa_alloc_interleaved( MB*C[i]*sizeof(float)); #else act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); #endif /* softmax has no incoming gradients */ if ( i < num_layers+1 ) { delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152); } } fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152); } bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) ); for ( i = 0 ; i < num_layers; ++i ) { bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152); } relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) ); for ( i = 0 ; i < num_layers; ++i ) { relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152); } label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152); /* init data */ for ( i = 0 ; i < num_layers+2; ++i ) { my_init_buf( act_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers+1; ++i ) { my_init_buf( delact_libxsmm[i], MB*C[i], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( fil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( bias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { my_init_buf( delbias_libxsmm[i], C[i+1], 0, 0 ); } for ( i = 0 ; i < num_layers; ++i ) { zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] ); } zero_buf_int32( label_libxsmm, MB ); printf("\n"); printf("##########################################\n"); printf("# Setting Up (custom-Storage) #\n"); printf("##########################################\n"); if ( fuse_type == 0 ) { my_fuse = MY_ELTWISE_FUSE_NONE; } else if ( fuse_type == 1 ) { my_fuse = MY_ELTWISE_FUSE_BIAS; } else if ( fuse_type == 2 ) { my_fuse = MY_ELTWISE_FUSE_RELU; } else if ( fuse_type == 4 ) { my_fuse = MY_ELTWISE_FUSE_BIAS_RELU; } else { /* cannot happen */ } /* allocating handles */ my_fc_fwd = (my_fc_fwd_config*) malloc( num_layers*sizeof(my_fc_fwd_config) ); my_fc_bwd = (my_fc_bwd_config*) malloc( num_layers*sizeof(my_fc_bwd_config) ); my_opt = (my_opt_config*) malloc( num_layers*sizeof(my_opt_config) ); /* setting up handles + scratch */ for ( i = 0; i < num_layers; ++i ) { my_fc_fwd[i] = setup_my_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_fc_bwd[i] = setup_my_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB, (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, my_fuse); my_opt[i] = setup_my_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ], (C[i+1] % bk == 0) ? bk : C[i+1], nThreads, lr ); /* let's allocate and bind scratch */ if ( my_fc_fwd[i].scratch_size > 0 || my_fc_bwd[i].scratch_size > 0 || my_opt[i].scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( my_fc_fwd[i].scratch_size, my_fc_bwd[i].scratch_size), my_opt[i].scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } } /* softmax+loss is treated as N+! layer */ my_smax_fwd = setup_my_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads ); my_smax_bwd = setup_my_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB, (C[num_layers+1] % bk == 0) ? bk : C[num_layers+1], nThreads, loss_weight ); if ( my_smax_fwd.scratch_size > 0 || my_smax_bwd.scratch_size > 0 ) { size_t alloc_size = LIBXSMM_MAX( my_smax_fwd.scratch_size, my_smax_bwd.scratch_size ); if ( alloc_size > scratch_size ) { if ( scratch != NULL ) libxsmm_free( scratch ); scratch_size = alloc_size; scratch = libxsmm_aligned_scratch( scratch_size, 2097152 ); my_init_buf( (float*)(scratch), (scratch_size)/4, 0, 0 ); } } my_numa_thr_cfg *numa_thr_cfg; /* Define numa configuration: #numa nodes, #threads on each node */ setup_my_numa(&numa_thr_cfg, num_layers, nThreads); if ( type == 'F') { printf("##########################################\n"); printf("# Performance - FWD (custom-Storage) #\n"); printf("##########################################\n"); setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif const int numa_node = numa_node_of_cpu(tid); for ( i = 0; i < num_layers; ++i) { copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0); } for (j = 0; j < iters; ++j) { for ( i = 0; i < num_layers; ++i) { my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i); } #ifdef USE_SOFTMAX my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss, 0, tid, scratch ); #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = 0; i < num_layers; ++i) { gflop += (2.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,FP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); /* Print some norms on last act for fwd and weights of first layer after all iterations */ libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0); printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref); } if (type == 'B') { printf("##########################################\n"); printf("# NOT Supported: Performance - BWD (custom-Storage) #\n"); printf("##########################################\n"); exit( -1 ); #if 0 l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif for (j = 0; j < iters; ++j) { #ifdef USE_SOFTMAX my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #endif for ( i = num_layers-1; i > 0; --i) { my_fc_bwd_exec( my_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch ); my_opt_exec( my_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid, scratch ); } my_fc_bwd_exec( my_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch ); my_opt_exec( my_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch ); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (4.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (2.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); #endif } if (type == 'A') { printf("##########################################\n"); printf("# Performance - FWD-BWD (custom-Storage) #\n"); printf("##########################################\n"); /* Timers: */ fwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads); bwd_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads); solver_time = (unsigned long long *) malloc(sizeof(unsigned long long) * nThreads); /* Calculate chunks of weights used on each nume node on FWD based on FWD thread decomposition */ setup_my_numa_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); /* Calculate chunks of weights used on each nume node on BWD/d based on BWD/d thread decomposition */ setup_my_numa_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd); /* NUMA aware allocations of buffers needed for FWD */ allocate_numa_buffers_fwd(&numa_thr_cfg, num_layers, my_fc_fwd); /* NUMA aware allocations of buffers needed for BWD */ allocate_numa_buffers_bwd_d(&numa_thr_cfg, num_layers, my_fc_bwd); /* Utility needed for transpoisition of weigths on BWD/d: get numa node based on current ofm */ int **fwd_ofm_to_node = (int**)malloc(sizeof(int*) * num_layers); set_fwd_ofm_to_node(fwd_ofm_to_node, &numa_thr_cfg, num_layers, my_fc_fwd); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i,j) #endif { #if defined(_OPENMP) const int tid = omp_get_thread_num(); #else const int tid = 0; #endif fwd_time[tid] = 0; bwd_time[tid] = 0; solver_time[tid] = 0; const int numa_node = numa_node_of_cpu(tid); for ( i = 0; i < num_layers; ++i) { /* Copy original weights to NUMA FWD buffers. Threading decomposition is the same with FWD. */ copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 0); } for (j = 0; j < iters; ++j) { unsigned long long fwd_time_start = libxsmm_timer_tick(); for ( i = 0; i < num_layers; ++i) { /* FWD: Use weights from NUMA FWD buffers */ my_fc_fwd_exec( my_fc_fwd[i], act_libxsmm[i], act_libxsmm[i+1], bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch, &numa_thr_cfg[numa_node], i ); } fwd_time[tid] += (libxsmm_timer_tick() - fwd_time_start); #ifdef USE_SOFTMAX my_smax_fwd_exec( my_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, &loss, 0, tid, scratch ); my_smax_bwd_exec( my_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], label_libxsmm, 0, tid, scratch ); #endif for ( i = num_layers-1; i > 0; --i) { unsigned long long bwd_time_start = libxsmm_timer_tick(); /* Transpose weights from NUMA FWD buffers to NUMA BWD buffer. Threading decomposition is the same with BWD/d. */ my_fc_bwd_d_transpose( my_fc_bwd[i], tid , &numa_thr_cfg, numa_node, i, fwd_ofm_to_node[i] ); /* BWD/d: Use weights from NUMA BWD buffers */ my_fc_bwd_exec( my_fc_bwd[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i], act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], MY_PASS_BWD, 0, tid, scratch, &numa_thr_cfg[numa_node], i ); bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start); /* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */ unsigned long long solver_time_start = libxsmm_timer_tick(); my_opt_exec( my_opt[i], delfil_libxsmm[i], 0, tid, &numa_thr_cfg[numa_node], i, my_fc_fwd[i] ); solver_time[tid] += (libxsmm_timer_tick() - solver_time_start); } /* BWD/w: todo */ unsigned long long bwd_time_start = libxsmm_timer_tick(); my_fc_bwd_exec( my_fc_bwd[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0], act_libxsmm[0], delbias_libxsmm[0], relumask_libxsmm[0], MY_PASS_BWD_W, 0, tid, scratch, &numa_thr_cfg[numa_node], 0 ); bwd_time[tid] += (libxsmm_timer_tick() - bwd_time_start); /* Solver: Update NUMA FWD buffers. Threading decomposition is the same with FWD. */ unsigned long long solver_time_start = libxsmm_timer_tick(); my_opt_exec( my_opt[0], delfil_libxsmm[0], 0, tid, &numa_thr_cfg[numa_node], 0, my_fc_fwd[0] ); solver_time[tid] += (libxsmm_timer_tick() - solver_time_start); } /* Copy result from NUMA FWD Buffers to original weights. Threading decomposition is the same with FWD. */ for ( i = 0; i < num_layers; ++i) { copy_to_numa_buffers_fwd(&numa_thr_cfg[numa_node], my_fc_fwd[i], fil_libxsmm[i], numa_node, i, tid, 1); } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); free_fwd_ofm_to_node(fwd_ofm_to_node, num_layers); free(fwd_ofm_to_node); #ifdef CHECK_L1 #if 1 /* Print some norms on last act for fwd and weights of first layer after all iterations */ libxsmm_matdiff(&norms_fwd, LIBXSMM_DATATYPE_F32, MB*C[num_layers], 1, act_libxsmm[num_layers], act_libxsmm[num_layers], 0, 0); printf("L1 of act[num_layers] : %.25g\n", norms_fwd.l1_ref); libxsmm_matdiff_reduce(&diff, &norms_fwd); libxsmm_matdiff(&norms_bwd, LIBXSMM_DATATYPE_F32, C[0]*C[1], 1, fil_libxsmm[0], fil_libxsmm[0], 0, 0); printf("L1 of wt[0] : %.25g\n", norms_bwd.l1_ref); libxsmm_matdiff_reduce(&diff, &norms_bwd); #else { int e = 0; FILE *fileAct, *fileWt; fileAct = fopen("acts.txt","w+"); if (fileAct != NULL) { for (e = 0; e < MB*C[num_layers]; e++) { fprintf(fileAct, "%.10g\n", *((float*)act_libxsmm[num_layers] + e)); } fclose(fileAct); } fileWt = fopen("weights.txt","w+"); if (fileWt != NULL) { for (e = 0; e < C[0]*C[1]; e++) { fprintf(fileWt, "%.10g\n", *((float*)fil_libxsmm[0] + e)); } fclose(fileWt); } } #endif #endif gflop = 0.0; for ( i = num_layers-1; i > 0; --i) { gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)iters) / (1000.0*1000.0*1000.0); } gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)iters) / (1000.0*1000.0*1000.0); printf("GFLOP = %.5g\n", gflop/(double)iters); printf("fp time = %.5g\n", ((double)(l_total/iters))); printf("GFLOPS = %.5g\n", gflop/l_total); printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB ); for ( i = 0; i < num_layers; ++i ) { printf("%i,", C[i] ); } printf("%f,%f\n", ((double)(l_total/iters)), gflop/l_total); unsigned long long max_fwd_time = 0, max_bwd_time = 0, max_solver_time = 0; for (i = 0; i < nThreads; i++) { if (max_fwd_time < fwd_time[i]) max_fwd_time = fwd_time[i]; if (max_bwd_time < bwd_time[i]) max_bwd_time = bwd_time[i]; if (max_solver_time < solver_time[i]) max_solver_time = solver_time[i]; } printf("Profiling: fwd_time = %zd, bwd_time = %zd, solver_time = %zd\n", max_fwd_time, max_bwd_time, max_solver_time); } /* deallocate data */ if ( scratch != NULL ) { libxsmm_free(scratch); } for ( i = 0; i < num_layers; ++i ) { if ( i == 0 ) { #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[i], MB*C[i]*sizeof(float)); #else libxsmm_free(act_libxsmm[i]); #endif libxsmm_free(delact_libxsmm[i]); } #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[i+1], MB*C[i+1]*sizeof(float)); #else libxsmm_free(act_libxsmm[i+1]); #endif libxsmm_free(delact_libxsmm[i+1]); libxsmm_free(fil_libxsmm[i]); libxsmm_free(delfil_libxsmm[i]); libxsmm_free(bias_libxsmm[i]); libxsmm_free(delbias_libxsmm[i]); libxsmm_free(relumask_libxsmm[i]); } #ifdef ACT_NUMA_INTERLEAVED numa_free(act_libxsmm[num_layers+1], MB*C[num_layers+1]*sizeof(float)); #else libxsmm_free(act_libxsmm[num_layers+1]); #endif libxsmm_free(label_libxsmm); for (i = 0; i < numa_num_configured_nodes(); i++) { free(numa_thr_cfg[i].blocksOFm_s); free(numa_thr_cfg[i].blocksOFm_e); free(numa_thr_cfg[i].blocksIFm_tr_s); free(numa_thr_cfg[i].blocksIFm_tr_e); for (j = 0; j < num_layers; j++) { numa_free_aligned(numa_thr_cfg[i].scratch[j], numa_thr_cfg[i].layer_size[j]); } free(numa_thr_cfg[i].scratch); free(numa_thr_cfg[i].layer_size); numa_free_aligned(numa_thr_cfg[i].bwd_d_scratch, numa_thr_cfg[i].bwd_d_scratch_size); } free(numa_thr_cfg); free( my_opt ); free( my_fc_fwd ); free( my_fc_bwd ); free( act_libxsmm ); free( delact_libxsmm ); free( fil_libxsmm ); free( delfil_libxsmm ); free( bias_libxsmm ); free( delbias_libxsmm ); free( relumask_libxsmm ); free( C ); /* some empty lines at the end */ printf("\n\n\n"); return 0; }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,sizeof(distort_args)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; PixelInfo invalid; /* the color to assign when distort result is invalid */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { register ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0],coeff[1],coeff[2]); (void) FormatLocaleFile(stderr," yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3],coeff[4],coeff[5]); (void) FormatLocaleFile(stderr," %s' \\\n",lookup); break; } case PerspectiveDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr,"Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i < 4; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for ( ; i < 7; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(), inverse[7]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%.1024s",image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n", GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1], GetMagickPrecision(),coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4], GetMagickPrecision(),coeff[5]); (void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n", coeff[8] < 0.0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: { (void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4],coeff[5],coeff[6],coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5- coeff[7]); (void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if (coeff[9] != 0) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]); } else (void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4],coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BilinearReverseDistortion: { #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0], (unsigned long) nterms); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n yy ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr,"\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n",(double) i,coeff[i]); (void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]); (void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1],coeff[7] ); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6],+coeff[4]); (void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n", coeff[7],+coeff[1]); (void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n", coeff[2]); (void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n", coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n", coeff[1],coeff[2]); (void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc, yc; /* NOTE: This does the barrel roll in pixel coords not image coords The internal distortion must do it in image coordinates, so that is what the center coeff (8,9) is given in. */ xc=((double)image->columns-1.0)/2.0+image->page.x; yc=((double)image->rows-1.0)/2.0+image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]- 0.5,coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid, exception); { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel; /* pixel color to assign to distorted image */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
attack_mp.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_blas.h> #include <omp.h> #include "util.h" int main(int argc, char *argv[]) { char* input_fileName1 = argv[1]; char* input_fileName2 = argv[2]; int N_doc_bg = atoi(argv[3]); int N_kw = atoi(argv[4]); int N_obs = atoi(argv[5]); int N_iter = atoi(argv[6]); char* output_fileName = argv[7]; int N_doc = 480000 / 2; int* matrix = (int*) malloc(sizeof(int) * N_obs * N_obs); int* matrix_bg = (int*) malloc(sizeof(int) * N_kw * N_kw); int* matrix_padded = (int*) malloc(sizeof(int) * N_obs * N_obs); int* true_index = (int*) malloc(sizeof(int) * N_kw); int* permutation = (int*) malloc(sizeof(int) * N_obs); gsl_matrix* matrix_obs; for (int round = 0; round < 10; round++) { char input_fileName1_extend[40]; char input_fileName2_extend[40]; sprintf(input_fileName1_extend, "%s%d", input_fileName1, round); sprintf(input_fileName2_extend, "%s%d", input_fileName2, round); // Setup struct timeval tv1,tv2; gettimeofday(&tv1, NULL); read_matrix(&true_index, &matrix_bg, 1.0*N_doc/N_doc_bg, N_kw, input_fileName2_extend); read_matrix(&true_index, &matrix, 1.0, N_obs, input_fileName1_extend); gettimeofday(&tv2, NULL); printf("Reading done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); fflush(stdout); // compute the maximum frequency of a keyword int freq_max = 0; for (int ii = 0; ii < N_kw; ii++) if (matrix_bg[ii*N_kw+ii] > freq_max) freq_max = matrix_bg[ii*N_kw+ii]; for (int ii = 0; ii < N_obs; ii++) if (matrix[ii*N_obs+ii] > freq_max) freq_max = matrix[ii*N_obs+ii]; printf("Max frequency: %d\n", freq_max); for (int iter = 0; iter < 10; iter++) { printf("Run %d\n", iter); matrix_obs = gsl_matrix_alloc(N_obs, N_obs); gettimeofday(&tv1, NULL); pad_matrix(&matrix_padded, &matrix, N_obs, N_doc, freq_max); gettimeofday(&tv2, NULL); printf("Padding done: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); fflush(stdout); gettimeofday(&tv1, NULL); observe_matrix(matrix_obs, &matrix_padded, N_obs); gettimeofday(&tv2, NULL); printf("Observed matrix generated: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); fflush(stdout); // Permute observed matrix randomly and attack gettimeofday(&tv1, NULL); attack(matrix_obs, &matrix_bg, &permutation, N_kw, N_obs, N_doc, freq_max, N_iter); gettimeofday(&tv2, NULL); printf("Main attack done: %d.\n", (tv2.tv_sec - tv1.tv_sec)); fflush(stdout); char output_fileName_full[40]; sprintf(output_fileName_full, "%s%d-%d", output_fileName, round, iter); print_result(output_fileName_full, &permutation, &true_index, N_obs); //sprintf(output_fileName_full, "%s%d-%d-full", output_fileName, round, iter); //print_full_result(output_fileName_full, &permutation, &true_index, N_obs); } } free(matrix); free(matrix_padded); gsl_matrix_free(matrix_obs); return(0); } double log_score(int idx1, int idx2, gsl_matrix* matrix_obs, int** matrix, int** permutation, int N_kw, int N_doc, int freq_max) { if (idx1 == idx2) return(0.0); int idx1_m = (*permutation)[idx1]; int idx2_m = (*permutation)[idx2]; int n1 = 2*freq_max - (*matrix)[idx1_m*N_kw + idx1_m]; int n2 = 2*freq_max - (*matrix)[idx2_m*N_kw + idx2_m]; /* int N_upper = 3 * sqrt((*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]) / (double) N_doc); int N_lower = (*matrix)[idx1_m*N_kw + idx2_m] - N_upper; N_upper = (*matrix)[idx1_m*N_kw + idx2_m] + N_upper; int count_total = (int) gsl_matrix_get(matrix_obs, idx1, idx2); double score = 0; double prob = (*matrix)[idx1_m*N_kw + idx2_m] / (double) N_doc; for (int kk = N_lower; kk < N_upper+1; kk+=20) score += gsl_ran_binomial_pdf(kk, prob, N_doc) * gsl_ran_hypergeometric_pdf(count_total - kk, n1, 2*N_doc - n1, n2); */ double mean = 1.0 * freq_max / N_doc * (n1 + n2); double var = 1.0 * (*matrix)[idx1_m*N_kw + idx2_m] / N_doc * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]); var += 1.0 * n1 / N_doc * freq_max * (2 * N_doc - 2*freq_max) / N_doc; var += 1.0 * n2 / N_doc * freq_max * (2 * N_doc - 2*freq_max) / N_doc; int count_total = (int) gsl_matrix_get(matrix_obs, idx1, idx2); int N_upper = 3 * sqrt((*matrix)[idx1_m*N_kw + idx2_m] * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]) / (double) N_doc); int N_lower = (*matrix)[idx1_m*N_kw + idx2_m] - N_upper; N_upper = (*matrix)[idx1_m*N_kw + idx2_m] + N_upper; mean += (*matrix)[idx1_m*N_kw + idx2_m]; var += (*matrix)[idx1_m*N_kw + idx2_m] / N_doc * (N_doc - (*matrix)[idx1_m*N_kw + idx2_m]); double score = gsl_ran_gaussian_pdf(count_total - mean, sqrt(var)); if (score == 0) return(-500.0); return(log(score)); } void attack(gsl_matrix* matrix_obs, int** matrix, int** permutation, int N_kw, int N_obs, int N_doc, int freq_max, int N_iter) { // Initialise data structures double* score_matrix = (double*) malloc(sizeof(double) * N_obs * N_obs); double* score_row1 = (double*) malloc(sizeof(double) * N_obs); double* score_row2 = (double*) malloc(sizeof(double) * N_obs); int* permutation_tmp = (int*) malloc(sizeof(int) * N_obs); int* permutation_inv = (int*) malloc(sizeof(int) * N_kw); // Initialise permutations for (int ii = 0; ii < N_obs; ii++) (*permutation)[ii] = ii; for (int ii = 0; ii < N_obs; ii++) permutation_tmp[ii] = ii; for (int ii = 0; ii < N_kw; ii++) permutation_inv[ii] = -1; for (int ii = 0; ii < N_obs; ii++) permutation_inv[permutation_tmp[ii]] = ii; // Initialising RNG const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); struct timeval tv1,tv2; gettimeofday(&tv1, NULL); // Compute initial score #pragma omp parallel for shared(score_matrix, matrix_obs, matrix) for (int ii = 0; ii < N_obs * N_obs; ii++) score_matrix[ii] = log_score((int) (ii / N_obs), ii % N_obs, matrix_obs, matrix, permutation, N_kw, N_doc, freq_max); gettimeofday(&tv2, NULL); printf("Initial score computed: %f.\n", (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); // Iterations of simulated annealing double temp = (double) N_kw; int N_stuck = 0; for (int iter = 0; iter < N_iter; iter++) { /* Status code */ if (iter % (N_iter / 10) == 0) { gettimeofday(&tv1, NULL); printf("Iteration: %d, %d, %d.\n", iter, N_stuck, (int) (tv1.tv_sec - tv2.tv_sec)); fflush(stdout); gettimeofday(&tv2, NULL); } // used to be 20k if (N_stuck >= 80000) iter = N_iter; /* Main code */ int idx1, idx2; permutation_generation(&idx1, &idx2, &permutation_tmp, permutation, &permutation_inv, N_kw, N_obs); int ii = 0; #pragma omp parallel for shared(score_row1) for (ii = 0; ii < N_obs; ii++) score_row1[ii] = log_score(idx1, ii, matrix_obs, matrix, &permutation_tmp, N_kw, N_doc, freq_max); if (idx2 >= 0) #pragma omp parallel for shared(score_row2) for (ii = 0; ii < N_obs; ii++) score_row2[ii] = log_score(idx2, ii, matrix_obs, matrix, &permutation_tmp, N_kw, N_doc, freq_max); double score_diff = 0; for (int ii = 0; ii < N_obs; ii++) score_diff += score_row1[ii]; for (int ii = 0; ii < N_obs; ii++) score_diff -= score_matrix[idx1*N_obs + ii]; if (idx2 >= 0) { for (int ii = 0; ii < N_obs; ii++) score_diff += score_row2[ii]; for (int ii = 0; ii < N_obs; ii++) score_diff -= score_matrix[idx2*N_obs + ii]; } // compute difference in score, with exponentiation score_diff = score_diff / temp; if (score_diff < -40) score_diff = 0; else if (score_diff > 0) score_diff = 1.01; else score_diff = exp(score_diff); if (gsl_ran_flat(r, 0, 1) < score_diff) { // Update the scores for (int ii = 0; ii < N_obs; ii++) score_matrix[idx1*N_obs + ii] = score_row1[ii]; for (int ii = 0; ii < N_obs; ii++) score_matrix[ii*N_obs + idx1] = score_row1[ii]; if (idx2 >= 0) { for (int ii = 0; ii < N_obs; ii++) score_matrix[idx2*N_obs + ii] = score_row2[ii]; for (int ii = 0; ii < N_obs; ii++) score_matrix[ii*N_obs + idx2] = score_row2[ii]; } // Update the permutation permutation_inv[(*permutation)[idx1]] = -1; (*permutation)[idx1] = permutation_tmp[idx1]; permutation_inv[permutation_tmp[idx1]] = idx1; if (idx2 >= 0) { (*permutation)[idx2] = permutation_tmp[idx2]; permutation_inv[permutation_tmp[idx2]] = idx2; } N_stuck = 0; } else { // Update the permutation permutation_tmp[idx1] = (*permutation)[idx1]; if (idx2 >= 0) permutation_tmp[idx2] = (*permutation)[idx2]; N_stuck += 1; } temp *= 0.995; } free(score_matrix); free(score_row1); free(score_row2); gsl_rng_free(r); } void print_result(char* output_fileName, int** permutation, int** true_index, int N_obs) { FILE* fp = fopen(output_fileName, "w"); int count = 0; for (int ii = 0; ii < N_obs; ii++) if ((*permutation)[ii] == (*true_index)[ii]) count++; fprintf(fp, "%d\n", count); fclose(fp); printf("Success: %d/%d.\n", count, N_obs); } void print_full_result(char* output_fileName, int** permutation, int** true_index, int N_obs) { FILE* fp = fopen(output_fileName, "w"); int count = 0; for (int ii = 0; ii < N_obs; ii++) if ((*permutation)[ii] == (*true_index)[ii]) count++; printf("Success: %d/%d.\n", count, N_obs); fprintf(fp, "%d\n", count); for (int ii = 0; ii < N_obs; ii++) fprintf(fp, "%d,%d\n", (*permutation)[ii], (*true_index)[ii]); fclose(fp); }
covariance.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "covariance.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final"))); *float_n = 1.2; for (i = 0; i < M; i++) for (j = 0; j < N; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_covariance(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m)) { int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final"))); int j __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final"))); int j1 __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final"))); int j2 __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final"))); #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel { #pragma omp for private (i) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Center the column vectors. */ #pragma omp for private (j) for (i = 0; i < _PB_N; i++) for (j = 0; j < _PB_M; j++) data[i][j] -= mean[j]; /* Calculate the m * m covariance matrix. */ #pragma omp for private (j2, i) for (j1 = 0; j1 < _PB_M; j1++) for (j2 = j1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += data[i][j1] * data[i][j2]; symmat[j2][j1] = symmat[j1][j2]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE __attribute((annotate("target('float_n') scalar(range(0,8))"))) float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE __attribute((annotate("target('data') scalar(range(0,1000))"))),M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE __attribute((annotate("target('cov') scalar(range(0,1000000000))"))),M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE __attribute((annotate("target('mean') scalar(range(0,1000000))"))),M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_covariance (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); return 0; }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_Int *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i+1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i+1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i+1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i+num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i-1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i-1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i-1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i+1); hypreDevice_IntegerInclusiveScan(B_nrows+1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i+B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i-1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i-1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows+1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1,1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p+i] = read_only_load(d_diag_j + i); d_ab[p+i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p+i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p+i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return(-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; nrows = row_end - row_start; if (row < row_start || row >= row_end) { return(-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } /* abs == 1, use absolute values * option == 0, drop all the entries that are smaller than tol * TODO more options */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int abs, HYPRE_Int option) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, abs, option); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, abs, option); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i] / A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
vt_comp_pgi.c
/** * VampirTrace * http://www.tu-dresden.de/zih/vampirtrace * * Copyright (c) 2005-2008, ZIH, TU Dresden, Federal Republic of Germany * * Copyright (c) 1998-2005, Forschungszentrum Juelich, Juelich Supercomputing * Centre, Federal Republic of Germany * * See the file COPYING in the package base directory for details **/ #include "vt_comp.h" #include "vt_defs.h" #include "vt_env.h" #include "vt_memhook.h" #include "vt_pform.h" #include "vt_trc.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #if (defined (VT_OMPI) || defined (VT_OMP)) # include <omp.h> #endif struct s1 { long l1; long l2; double d1; double d2; long isseen; char *c; void *p1; long lineno; void *p2; struct s1 *p3; int fid; int rid; char *file; char *rout; }; static int rou_init = 1; /* initialization necessary ? */ /* *----------------------------------------------------------------------------- * called during program initialization *----------------------------------------------------------------------------- */ void __rouinit() { int i; VT_MEMHOOKS_OFF(); /* open trace file */ vt_open(); if (rou_init) { rou_init = 0; } VT_MEMHOOKS_ON(); } /* *----------------------------------------------------------------------------- * called at the beginning of each profiled routine *----------------------------------------------------------------------------- */ void ___rouent2(struct s1 *p) { uint64_t time; if (rou_init) { rou_init = 0; __rouinit(); } /* -- if VampirTrace already finalized, return -- */ if ( !vt_is_alive ) return; VT_MEMHOOKS_OFF(); time = vt_pform_wtime(); if (!p->isseen) { char* rname = p->rout; char* modpos; /* fix opari output file names */ if ( (modpos = strstr(p->file, ".mod.")) != NULL ) { strcpy(modpos, modpos+4); } #if (defined (VT_OMPI) || defined (VT_OMP)) if (omp_in_parallel()) { #pragma omp critical (vt_comp_pgi_1) { if (!p->isseen) { p->fid = vt_def_file(p->file); p->rid = vt_def_region(rname, p->fid, p->lineno, VT_NO_LNO, VT_DEF_GROUP, VT_FUNCTION); p->isseen = 1; } } } else { p->fid = vt_def_file(p->file); p->rid = vt_def_region(rname, p->fid, p->lineno, VT_NO_LNO, VT_DEF_GROUP, VT_FUNCTION); p->isseen = 1; } #else p->fid = vt_def_file(p->file); p->rid = vt_def_region(rname, p->fid, p->lineno, VT_NO_LNO, VT_DEF_GROUP, VT_FUNCTION); p->isseen = 1; #endif } /* write enter trace record */ vt_enter(&time, p->rid); VT_MEMHOOKS_ON(); } /* *----------------------------------------------------------------------------- * called at the end of each profiled routine *----------------------------------------------------------------------------- */ void ___rouret2(void) { uint64_t time; /* -- if VampirTrace already finalized, return -- */ if ( !vt_is_alive ) return; VT_MEMHOOKS_OFF(); time = vt_pform_wtime(); vt_exit(&time); VT_MEMHOOKS_ON(); } void ___linent2(void *l) { }
fwi_propagator.c
/* * ============================================================================= * Copyright (c) 2016-2018, Barcelona Supercomputing Center (BSC) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ============================================================================= */ #include "fwi/fwi_propagator.h" inline integer IDX (const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((y*dimmx)+x)*dimmz + z; }; real stencil_Z ( const integer off, const real* restrict ptr, const real dzi, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((C0 * ( ptr[IDX(z +off,x,y,dimmz,dimmx)] - ptr[IDX(z-1+off,x,y,dimmz,dimmx)]) + C1 * ( ptr[IDX(z+1+off,x,y,dimmz,dimmx)] - ptr[IDX(z-2+off,x,y,dimmz,dimmx)]) + C2 * ( ptr[IDX(z+2+off,x,y,dimmz,dimmx)] - ptr[IDX(z-3+off,x,y,dimmz,dimmx)]) + C3 * ( ptr[IDX(z+3+off,x,y,dimmz,dimmx)] - ptr[IDX(z-4+off,x,y,dimmz,dimmx)])) * dzi ); }; real stencil_X( const integer off, const real* restrict ptr, const real dxi, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((C0 * ( ptr[IDX(z,x +off,y,dimmz,dimmx)] - ptr[IDX(z,x-1+off,y,dimmz,dimmx)]) + C1 * ( ptr[IDX(z,x+1+off,y,dimmz,dimmx)] - ptr[IDX(z,x-2+off,y,dimmz,dimmx)]) + C2 * ( ptr[IDX(z,x+2+off,y,dimmz,dimmx)] - ptr[IDX(z,x-3+off,y,dimmz,dimmx)]) + C3 * ( ptr[IDX(z,x+3+off,y,dimmz,dimmx)] - ptr[IDX(z,x-4+off,y,dimmz,dimmx)])) * dxi ); }; real stencil_Y( const integer off, const real* restrict ptr, const real dyi, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((C0 * ( ptr[IDX(z,x,y +off,dimmz,dimmx)] - ptr[IDX(z,x,y-1+off,dimmz,dimmx)]) + C1 * ( ptr[IDX(z,x,y+1+off,dimmz,dimmx)] - ptr[IDX(z,x,y-2+off,dimmz,dimmx)]) + C2 * ( ptr[IDX(z,x,y+2+off,dimmz,dimmx)] - ptr[IDX(z,x,y-3+off,dimmz,dimmx)]) + C3 * ( ptr[IDX(z,x,y+3+off,dimmz,dimmx)] - ptr[IDX(z,x,y-4+off,dimmz,dimmx)])) * dyi ); }; /* -------------------------------------------------------------------- */ /* KERNELS FOR VELOCITY */ /* -------------------------------------------------------------------- */ inline real rho_BL ( const real* restrict rho, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return (2.0f / (rho[IDX(z,x,y,dimmz,dimmx)] + rho[IDX(z+1,x,y,dimmz,dimmx)])); }; inline real rho_TR ( const real* restrict rho, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return (2.0f / (rho[IDX(z,x,y,dimmz,dimmx)] + rho[IDX(z,x+1,y,dimmz,dimmx)])); }; inline real rho_BR ( const real* restrict rho, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ( 8.0f/ ( rho[IDX(z ,x ,y ,dimmz,dimmx)] + rho[IDX(z+1,x ,y ,dimmz,dimmx)] + rho[IDX(z ,x+1,y ,dimmz,dimmx)] + rho[IDX(z ,x ,y+1,dimmz,dimmx)] + rho[IDX(z ,x+1,y+1,dimmz,dimmx)] + rho[IDX(z+1,x+1,y ,dimmz,dimmx)] + rho[IDX(z+1,x ,y+1,dimmz,dimmx)] + rho[IDX(z+1,x+1,y+1,dimmz,dimmx)]) ); }; inline real rho_TL ( const real* restrict rho, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return (2.0f / (rho[IDX(z,x,y,dimmz,dimmx)] + rho[IDX(z,x,y+1,dimmz,dimmx)])); }; void compute_component_vcell_TL ( real* restrict vptr, const real* restrict szptr, const real* restrict sxptr, const real* restrict syptr, const real* restrict rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copyin(szptr[start:nelems], sxptr[start:nelems], syptr[start:nelems], rho[start:nelems]) \ copy(vptr[start:nelems]) \ async(phase) wait(H2D) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end _OPENACC */ for(integer y=ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for(integer x=nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL_COMPILER) #pragma simd #endif for(integer z=nz0; z < nzf; z++) { const real lrho = rho_TL(rho, z, x, y, dimmz, dimmx); const real stx = stencil_X( _SX, sxptr, dxi, z, x, y, dimmz, dimmx); const real sty = stencil_Y( _SY, syptr, dyi, z, x, y, dimmz, dimmx); const real stz = stencil_Z( _SZ, szptr, dzi, z, x, y, dimmz, dimmx); vptr[IDX(z,x,y,dimmz,dimmx)] += (stx + sty + stz) * dt * lrho; } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(szptr, sxptr, syptr, rho, vptr) { compute_component_vcell_TL_cuda(vptr, szptr, sxptr, syptr, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_vcell_TR ( real* restrict vptr, const real* restrict szptr, const real* restrict sxptr, const real* restrict syptr, const real* restrict rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copyin(szptr[start:nelems], sxptr[start:nelems], syptr[start:nelems], rho[start:nelems]) \ copy(vptr[start:nelems]) \ async(phase) wait(H2D) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for(integer y=ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for(integer x=nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL_COMPILER) #pragma simd #endif for(integer z=nz0; z < nzf; z++) { const real lrho = rho_TR(rho, z, x, y, dimmz, dimmx); const real stx = stencil_X( _SX, sxptr, dxi, z, x, y, dimmz, dimmx); const real sty = stencil_Y( _SY, syptr, dyi, z, x, y, dimmz, dimmx); const real stz = stencil_Z( _SZ, szptr, dzi, z, x, y, dimmz, dimmx); vptr[IDX(z,x,y,dimmz,dimmx)] += (stx + sty + stz) * dt * lrho; } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(szptr, sxptr, syptr, rho, vptr) { compute_component_vcell_TR_cuda(vptr, szptr, sxptr, syptr, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_vcell_BR ( real* restrict vptr, const real* restrict szptr, const real* restrict sxptr, const real* restrict syptr, const real* restrict rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copyin(szptr[start:nelems], sxptr[start:nelems], syptr[start:nelems], rho[start:nelems]) \ copy(vptr[start:nelems]) \ async(phase) wait(H2D) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for(integer y=ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for(integer x=nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL_COMPILER) #pragma simd #endif for(integer z=nz0; z < nzf; z++) { const real lrho = rho_BR(rho, z, x, y, dimmz, dimmx); const real stx = stencil_X( _SX, sxptr, dxi, z, x, y, dimmz, dimmx ); const real sty = stencil_Y( _SY, syptr, dyi, z, x, y, dimmz, dimmx ); const real stz = stencil_Z( _SZ, szptr, dzi, z, x, y, dimmz, dimmx ); vptr[IDX(z,x,y,dimmz,dimmx)] += (stx + sty + stz) * dt * lrho; } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(szptr, sxptr, syptr, rho, vptr) { compute_component_vcell_BR_cuda(vptr, szptr, sxptr, syptr, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_vcell_BL ( real* restrict vptr, const real* restrict szptr, const real* restrict sxptr, const real* restrict syptr, const real* restrict rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copyin(szptr[start:nelems], sxptr[start:nelems], syptr[start:nelems], rho[start:nelems]) \ copy(vptr[start:nelems]) \ async(phase) wait(H2D) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for(integer y=ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for(integer x=nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL_COMPILER) #pragma simd #endif for(integer z=nz0; z < nzf; z++) { const real lrho = rho_BL(rho, z, x, y, dimmz, dimmx); const real stx = stencil_X( _SX, sxptr, dxi, z, x, y, dimmz, dimmx); const real sty = stencil_Y( _SY, syptr, dyi, z, x, y, dimmz, dimmx); const real stz = stencil_Z( _SZ, szptr, dzi, z, x, y, dimmz, dimmx); vptr[IDX(z,x,y,dimmz,dimmx)] += (stx + sty + stz) * dt * lrho; } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(szptr, sxptr, syptr, rho, vptr) { compute_component_vcell_BL_cuda(vptr, szptr, sxptr, syptr, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void velocity_propagator(v_t v, s_t s, coeff_t coeffs, real* rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const integer dimmz, const integer dimmx, const phase_t phase) { #if defined(DEBUG) fprintf(stderr, "Integration limits of %s are (z "I"-"I",x "I"-"I",y "I"-"I")\n", __FUNCTION__, nz0,nzf,nx0,nxf,ny0,nyf); #endif #if defined(__INTEL_COMPILER) #pragma forceinline recursive #endif { compute_component_vcell_TL (v.tl.w, s.bl.zz, s.tr.xz, s.tl.yz, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, back_offset, forw_offset, dimmz, dimmx, phase); compute_component_vcell_TR (v.tr.w, s.br.zz, s.tl.xz, s.tr.yz, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, forw_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BL (v.bl.w, s.tl.zz, s.br.xz, s.bl.yz, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, back_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BR (v.br.w, s.tr.zz, s.bl.xz, s.br.yz, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, forw_offset, forw_offset, dimmz, dimmx, phase); compute_component_vcell_TL (v.tl.u, s.bl.xz, s.tr.xx, s.tl.xy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, back_offset, forw_offset, dimmz, dimmx, phase); compute_component_vcell_TR (v.tr.u, s.br.xz, s.tl.xx, s.tr.xy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, forw_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BL (v.bl.u, s.tl.xz, s.br.xx, s.bl.xy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, back_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BR (v.br.u, s.tr.xz, s.bl.xx, s.br.xy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, forw_offset, forw_offset, dimmz, dimmx, phase); compute_component_vcell_TL (v.tl.v, s.bl.yz, s.tr.xy, s.tl.yy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, back_offset, forw_offset, dimmz, dimmx, phase); compute_component_vcell_TR (v.tr.v, s.br.yz, s.tl.xy, s.tr.yy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, forw_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BL (v.bl.v, s.tl.yz, s.br.xy, s.bl.yy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, back_offset, back_offset, dimmz, dimmx, phase); compute_component_vcell_BR (v.br.v, s.tr.yz, s.bl.xy, s.br.yy, rho, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, forw_offset, forw_offset, dimmz, dimmx, phase); } }; /* ------------------------------------------------------------------------------ */ /* */ /* CALCULO DE TENSIONES */ /* */ /* ------------------------------------------------------------------------------ */ void stress_update(real* restrict sptr, const real c1, const real c2, const real c3, const real c4, const real c5, const real c6, const integer z, const integer x, const integer y, const real dt, const real u_x, const real u_y, const real u_z, const real v_x, const real v_y, const real v_z, const real w_x, const real w_y, const real w_z, const integer dimmz, const integer dimmx) { real accum = dt * c1 * u_x; accum += dt * c2 * v_y; accum += dt * c3 * w_z; accum += dt * c4 * (w_y + v_z); accum += dt * c5 * (w_x + u_z); accum += dt * c6 * (v_x + u_y); sptr[IDX(z,x,y,dimmz,dimmx)] += accum; }; void stress_propagator(s_t s, v_t v, coeff_t coeffs, real* rho, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const integer dimmz, const integer dimmx, const phase_t phase ) { #if defined(DEBUG) fprintf(stderr, "Integration limits of %s are (z "I"-"I",x "I"-"I",y "I"-"I")\n", __FUNCTION__, nz0,nzf,nx0,nxf,ny0,nyf); #endif #if defined(__INTEL_COMPILER) #pragma forceinline recursive #endif { compute_component_scell_BR ( s, v.tr, v.bl, v.br, coeffs, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, back_offset, back_offset, dimmz, dimmx, phase); compute_component_scell_BL ( s, v.tl, v.br, v.bl, coeffs, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, forw_offset, back_offset, forw_offset, dimmz, dimmx, phase); compute_component_scell_TR ( s, v.br, v.tl, v.tr, coeffs, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, forw_offset, forw_offset, dimmz, dimmx, phase); compute_component_scell_TL ( s, v.bl, v.tr, v.tl, coeffs, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, back_offset, back_offset, back_offset, dimmz, dimmx, phase); } }; real cell_coeff_BR ( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ( 1.0f / ( 2.5f *(ptr[IDX(z , x ,y,dimmz,dimmx)] + ptr[IDX(z , x+1,y,dimmz,dimmx)] + ptr[IDX(z+1, x ,y,dimmz,dimmx)] + ptr[IDX(z+1, x+1,y,dimmz,dimmx)])) ); }; real cell_coeff_TL ( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ( 1.0f / (ptr[IDX(z,x,y,dimmz,dimmx)])); }; real cell_coeff_BL ( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ( 1.0f / ( 2.5f *(ptr[IDX(z ,x,y ,dimmz,dimmx)] + ptr[IDX(z ,x,y+1,dimmz,dimmx)] + ptr[IDX(z+1,x,y ,dimmz,dimmx)] + ptr[IDX(z+1,x,y+1,dimmz,dimmx)])) ); }; real cell_coeff_TR ( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ( 1.0f / ( 2.5f *(ptr[IDX(z , x , y ,dimmz,dimmx)] + ptr[IDX(z , x+1, y ,dimmz,dimmx)] + ptr[IDX(z , x , y+1,dimmz,dimmx)] + ptr[IDX(z , x+1, y+1,dimmz,dimmx)]))); }; real cell_coeff_ARTM_BR( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((1.0f / ptr[IDX(z ,x ,y,dimmz,dimmx )] + 1.0f / ptr[IDX(z ,x+1,y,dimmz,dimmx )] + 1.0f / ptr[IDX(z+1,x ,y,dimmz,dimmx )] + 1.0f / ptr[IDX(z+1,x+1,y,dimmz,dimmx )]) * 0.25f); }; real cell_coeff_ARTM_TL( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return (1.0f / ptr[IDX(z,x,y,dimmz,dimmx)]); }; real cell_coeff_ARTM_BL( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((1.0f / ptr[IDX(z ,x,y ,dimmz,dimmx)] + 1.0f / ptr[IDX(z ,x,y+1,dimmz,dimmx)] + 1.0f / ptr[IDX(z+1,x,y ,dimmz,dimmx)] + 1.0f / ptr[IDX(z+1,x,y+1,dimmz,dimmx)]) * 0.25f); }; real cell_coeff_ARTM_TR( const real* restrict ptr, const integer z, const integer x, const integer y, const integer dimmz, const integer dimmx) { return ((1.0f / ptr[IDX(z,x ,y ,dimmz,dimmx)] + 1.0f / ptr[IDX(z,x+1,y ,dimmz,dimmx)] + 1.0f / ptr[IDX(z,x ,y+1,dimmz,dimmx)] + 1.0f / ptr[IDX(z,x+1,y+1,dimmz,dimmx)]) * 0.25f); }; void compute_component_scell_TR (s_t s, point_v_t vnode_z, point_v_t vnode_x, point_v_t vnode_y, coeff_t coeffs, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { real* restrict sxxptr __attribute__ ((aligned (64))) = s.tr.xx; real* restrict syyptr __attribute__ ((aligned (64))) = s.tr.yy; real* restrict szzptr __attribute__ ((aligned (64))) = s.tr.zz; real* restrict syzptr __attribute__ ((aligned (64))) = s.tr.yz; real* restrict sxzptr __attribute__ ((aligned (64))) = s.tr.xz; real* restrict sxyptr __attribute__ ((aligned (64))) = s.tr.xy; const real* restrict vxu __attribute__ ((aligned (64))) = vnode_x.u; const real* restrict vxv __attribute__ ((aligned (64))) = vnode_x.v; const real* restrict vxw __attribute__ ((aligned (64))) = vnode_x.w; const real* restrict vyu __attribute__ ((aligned (64))) = vnode_y.u; const real* restrict vyv __attribute__ ((aligned (64))) = vnode_y.v; const real* restrict vyw __attribute__ ((aligned (64))) = vnode_y.w; const real* restrict vzu __attribute__ ((aligned (64))) = vnode_z.u; const real* restrict vzv __attribute__ ((aligned (64))) = vnode_z.v; const real* restrict vzw __attribute__ ((aligned (64))) = vnode_z.w; const real* restrict cc11 = coeffs.c11; const real* restrict cc12 = coeffs.c12; const real* restrict cc13 = coeffs.c13; const real* restrict cc14 = coeffs.c14; const real* restrict cc15 = coeffs.c15; const real* restrict cc16 = coeffs.c16; const real* restrict cc22 = coeffs.c22; const real* restrict cc23 = coeffs.c23; const real* restrict cc24 = coeffs.c24; const real* restrict cc25 = coeffs.c25; const real* restrict cc26 = coeffs.c26; const real* restrict cc33 = coeffs.c33; const real* restrict cc34 = coeffs.c34; const real* restrict cc35 = coeffs.c35; const real* restrict cc36 = coeffs.c36; const real* restrict cc44 = coeffs.c44; const real* restrict cc45 = coeffs.c45; const real* restrict cc46 = coeffs.c46; const real* restrict cc55 = coeffs.c55; const real* restrict cc56 = coeffs.c56; const real* restrict cc66 = coeffs.c66; #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copy(sxxptr[start:nelems], syyptr[start:nelems], szzptr[start:nelems], syzptr[start:nelems], sxzptr[start:nelems], sxyptr[start:nelems]) \ copyin(vxu[start:nelems], vxv[start:nelems], vxw[start:nelems]) \ copyin(vyu[start:nelems], vyv[start:nelems], vyw[start:nelems]) \ copyin(vzu[start:nelems], vzv[start:nelems], vzw[start:nelems]) \ copyin(cc11[start:nelems], cc12[start:nelems], cc13[start:nelems], cc14[start:nelems], cc15[start:nelems], cc16[start:nelems]) \ copyin(cc22[start:nelems], cc23[start:nelems], cc24[start:nelems], cc25[start:nelems], cc26[start:nelems]) \ copyin(cc33[start:nelems], cc34[start:nelems], cc35[start:nelems], cc36[start:nelems]) \ copyin(cc44[start:nelems], cc45[start:nelems], cc46[start:nelems]) \ copyin(cc55[start:nelems], cc56[start:nelems]) \ copyin(cc66[start:nelems]) \ async(phase) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for (integer y = ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for (integer x = nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL_COMPILER) #pragma simd #endif for (integer z = nz0; z < nzf; z++ ) { const real c11 = cell_coeff_TR (cc11, z, x, y, dimmz, dimmx); const real c12 = cell_coeff_TR (cc12, z, x, y, dimmz, dimmx); const real c13 = cell_coeff_TR (cc13, z, x, y, dimmz, dimmx); const real c14 = cell_coeff_ARTM_TR (cc14, z, x, y, dimmz, dimmx); const real c15 = cell_coeff_ARTM_TR (cc15, z, x, y, dimmz, dimmx); const real c16 = cell_coeff_ARTM_TR (cc16, z, x, y, dimmz, dimmx); const real c22 = cell_coeff_TR (cc22, z, x, y, dimmz, dimmx); const real c23 = cell_coeff_TR (cc23, z, x, y, dimmz, dimmx); const real c24 = cell_coeff_ARTM_TR (cc24, z, x, y, dimmz, dimmx); const real c25 = cell_coeff_ARTM_TR (cc25, z, x, y, dimmz, dimmx); const real c26 = cell_coeff_ARTM_TR (cc26, z, x, y, dimmz, dimmx); const real c33 = cell_coeff_TR (cc33, z, x, y, dimmz, dimmx); const real c34 = cell_coeff_ARTM_TR (cc34, z, x, y, dimmz, dimmx); const real c35 = cell_coeff_ARTM_TR (cc35, z, x, y, dimmz, dimmx); const real c36 = cell_coeff_ARTM_TR (cc36, z, x, y, dimmz, dimmx); const real c44 = cell_coeff_TR (cc44, z, x, y, dimmz, dimmx); const real c45 = cell_coeff_ARTM_TR (cc45, z, x, y, dimmz, dimmx); const real c46 = cell_coeff_ARTM_TR (cc46, z, x, y, dimmz, dimmx); const real c55 = cell_coeff_TR (cc55, z, x, y, dimmz, dimmx); const real c56 = cell_coeff_ARTM_TR (cc56, z, x, y, dimmz, dimmx); const real c66 = cell_coeff_TR (cc66, z, x, y, dimmz, dimmx); const real u_x = stencil_X (_SX, vxu, dxi, z, x, y, dimmz, dimmx); const real v_x = stencil_X (_SX, vxv, dxi, z, x, y, dimmz, dimmx); const real w_x = stencil_X (_SX, vxw, dxi, z, x, y, dimmz, dimmx); const real u_y = stencil_Y (_SY, vyu, dyi, z, x, y, dimmz, dimmx); const real v_y = stencil_Y (_SY, vyv, dyi, z, x, y, dimmz, dimmx); const real w_y = stencil_Y (_SY, vyw, dyi, z, x, y, dimmz, dimmx); const real u_z = stencil_Z (_SZ, vzu, dzi, z, x, y, dimmz, dimmx); const real v_z = stencil_Z (_SZ, vzv, dzi, z, x, y, dimmz, dimmx); const real w_z = stencil_Z (_SZ, vzw, dzi, z, x, y, dimmz, dimmx); stress_update (sxxptr,c11,c12,c13,c14,c15,c16,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syyptr,c12,c22,c23,c24,c25,c26,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (szzptr,c13,c23,c33,c34,c35,c36,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syzptr,c14,c24,c34,c44,c45,c46,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxzptr,c15,c25,c35,c45,c55,c56,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxyptr,c16,c26,c36,c46,c56,c66,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66) { compute_component_scell_TR_cuda( sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_scell_TL (s_t s, point_v_t vnode_z, point_v_t vnode_x, point_v_t vnode_y, coeff_t coeffs, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { real* restrict sxxptr __attribute__ ((aligned (64))) = s.tl.xx; real* restrict syyptr __attribute__ ((aligned (64))) = s.tl.yy; real* restrict szzptr __attribute__ ((aligned (64))) = s.tl.zz; real* restrict syzptr __attribute__ ((aligned (64))) = s.tl.yz; real* restrict sxzptr __attribute__ ((aligned (64))) = s.tl.xz; real* restrict sxyptr __attribute__ ((aligned (64))) = s.tl.xy; const real* restrict vxu __attribute__ ((aligned (64))) = vnode_x.u; const real* restrict vxv __attribute__ ((aligned (64))) = vnode_x.v; const real* restrict vxw __attribute__ ((aligned (64))) = vnode_x.w; const real* restrict vyu __attribute__ ((aligned (64))) = vnode_y.u; const real* restrict vyv __attribute__ ((aligned (64))) = vnode_y.v; const real* restrict vyw __attribute__ ((aligned (64))) = vnode_y.w; const real* restrict vzu __attribute__ ((aligned (64))) = vnode_z.u; const real* restrict vzv __attribute__ ((aligned (64))) = vnode_z.v; const real* restrict vzw __attribute__ ((aligned (64))) = vnode_z.w; const real* restrict cc11 = coeffs.c11; const real* restrict cc12 = coeffs.c12; const real* restrict cc13 = coeffs.c13; const real* restrict cc14 = coeffs.c14; const real* restrict cc15 = coeffs.c15; const real* restrict cc16 = coeffs.c16; const real* restrict cc22 = coeffs.c22; const real* restrict cc23 = coeffs.c23; const real* restrict cc24 = coeffs.c24; const real* restrict cc25 = coeffs.c25; const real* restrict cc26 = coeffs.c26; const real* restrict cc33 = coeffs.c33; const real* restrict cc34 = coeffs.c34; const real* restrict cc35 = coeffs.c35; const real* restrict cc36 = coeffs.c36; const real* restrict cc44 = coeffs.c44; const real* restrict cc45 = coeffs.c45; const real* restrict cc46 = coeffs.c46; const real* restrict cc55 = coeffs.c55; const real* restrict cc56 = coeffs.c56; const real* restrict cc66 = coeffs.c66; #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copy(sxxptr[start:nelems], syyptr[start:nelems], szzptr[start:nelems], syzptr[start:nelems], sxzptr[start:nelems], sxyptr[start:nelems]) \ copyin(vxu[start:nelems], vxv[start:nelems], vxw[start:nelems]) \ copyin(vyu[start:nelems], vyv[start:nelems], vyw[start:nelems]) \ copyin(vzu[start:nelems], vzv[start:nelems], vzw[start:nelems]) \ copyin(cc11[start:nelems], cc12[start:nelems], cc13[start:nelems], cc14[start:nelems], cc15[start:nelems], cc16[start:nelems]) \ copyin(cc22[start:nelems], cc23[start:nelems], cc24[start:nelems], cc25[start:nelems], cc26[start:nelems]) \ copyin(cc33[start:nelems], cc34[start:nelems], cc35[start:nelems], cc36[start:nelems]) \ copyin(cc44[start:nelems], cc45[start:nelems], cc46[start:nelems]) \ copyin(cc55[start:nelems], cc56[start:nelems]) \ copyin(cc66[start:nelems]) \ async(phase) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for (integer y = ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for (integer x = nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL__COMPILER) #pragma simd #endif for (integer z = nz0; z < nzf; z++ ) { const real c11 = cell_coeff_TL (cc11, z, x, y, dimmz, dimmx); const real c12 = cell_coeff_TL (cc12, z, x, y, dimmz, dimmx); const real c13 = cell_coeff_TL (cc13, z, x, y, dimmz, dimmx); const real c14 = cell_coeff_ARTM_TL (cc14, z, x, y, dimmz, dimmx); const real c15 = cell_coeff_ARTM_TL (cc15, z, x, y, dimmz, dimmx); const real c16 = cell_coeff_ARTM_TL (cc16, z, x, y, dimmz, dimmx); const real c22 = cell_coeff_TL (cc22, z, x, y, dimmz, dimmx); const real c23 = cell_coeff_TL (cc23, z, x, y, dimmz, dimmx); const real c24 = cell_coeff_ARTM_TL (cc24, z, x, y, dimmz, dimmx); const real c25 = cell_coeff_ARTM_TL (cc25, z, x, y, dimmz, dimmx); const real c26 = cell_coeff_ARTM_TL (cc26, z, x, y, dimmz, dimmx); const real c33 = cell_coeff_TL (cc33, z, x, y, dimmz, dimmx); const real c34 = cell_coeff_ARTM_TL (cc34, z, x, y, dimmz, dimmx); const real c35 = cell_coeff_ARTM_TL (cc35, z, x, y, dimmz, dimmx); const real c36 = cell_coeff_ARTM_TL (cc36, z, x, y, dimmz, dimmx); const real c44 = cell_coeff_TL (cc44, z, x, y, dimmz, dimmx); const real c45 = cell_coeff_ARTM_TL (cc45, z, x, y, dimmz, dimmx); const real c46 = cell_coeff_ARTM_TL (cc46, z, x, y, dimmz, dimmx); const real c55 = cell_coeff_TL (cc55, z, x, y, dimmz, dimmx); const real c56 = cell_coeff_ARTM_TL (cc56, z, x, y, dimmz, dimmx); const real c66 = cell_coeff_TL (cc66, z, x, y, dimmz, dimmx); const real u_x = stencil_X (_SX, vxu, dxi, z, x, y, dimmz, dimmx); const real v_x = stencil_X (_SX, vxv, dxi, z, x, y, dimmz, dimmx); const real w_x = stencil_X (_SX, vxw, dxi, z, x, y, dimmz, dimmx); const real u_y = stencil_Y (_SY, vyu, dyi, z, x, y, dimmz, dimmx); const real v_y = stencil_Y (_SY, vyv, dyi, z, x, y, dimmz, dimmx); const real w_y = stencil_Y (_SY, vyw, dyi, z, x, y, dimmz, dimmx); const real u_z = stencil_Z (_SZ, vzu, dzi, z, x, y, dimmz, dimmx); const real v_z = stencil_Z (_SZ, vzv, dzi, z, x, y, dimmz, dimmx); const real w_z = stencil_Z (_SZ, vzw, dzi, z, x, y, dimmz, dimmx); stress_update (sxxptr,c11,c12,c13,c14,c15,c16,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syyptr,c12,c22,c23,c24,c25,c26,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (szzptr,c13,c23,c33,c34,c35,c36,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syzptr,c14,c24,c34,c44,c45,c46,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxzptr,c15,c25,c35,c45,c55,c56,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxyptr,c16,c26,c36,c46,c56,c66,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66) { compute_component_scell_TL_cuda( sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_scell_BR (s_t s, point_v_t vnode_z, point_v_t vnode_x, point_v_t vnode_y, coeff_t coeffs, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { real* restrict sxxptr __attribute__ ((aligned (64))) = s.br.xx; real* restrict syyptr __attribute__ ((aligned (64))) = s.br.yy; real* restrict szzptr __attribute__ ((aligned (64))) = s.br.zz; real* restrict syzptr __attribute__ ((aligned (64))) = s.br.yz; real* restrict sxzptr __attribute__ ((aligned (64))) = s.br.xz; real* restrict sxyptr __attribute__ ((aligned (64))) = s.br.xy; const real* restrict vxu __attribute__ ((aligned (64))) = vnode_x.u; const real* restrict vxv __attribute__ ((aligned (64))) = vnode_x.v; const real* restrict vxw __attribute__ ((aligned (64))) = vnode_x.w; const real* restrict vyu __attribute__ ((aligned (64))) = vnode_y.u; const real* restrict vyv __attribute__ ((aligned (64))) = vnode_y.v; const real* restrict vyw __attribute__ ((aligned (64))) = vnode_y.w; const real* restrict vzu __attribute__ ((aligned (64))) = vnode_z.u; const real* restrict vzv __attribute__ ((aligned (64))) = vnode_z.v; const real* restrict vzw __attribute__ ((aligned (64))) = vnode_z.w; const real* restrict cc11 = coeffs.c11; const real* restrict cc12 = coeffs.c12; const real* restrict cc13 = coeffs.c13; const real* restrict cc14 = coeffs.c14; const real* restrict cc15 = coeffs.c15; const real* restrict cc16 = coeffs.c16; const real* restrict cc22 = coeffs.c22; const real* restrict cc23 = coeffs.c23; const real* restrict cc24 = coeffs.c24; const real* restrict cc25 = coeffs.c25; const real* restrict cc26 = coeffs.c26; const real* restrict cc33 = coeffs.c33; const real* restrict cc34 = coeffs.c34; const real* restrict cc35 = coeffs.c35; const real* restrict cc36 = coeffs.c36; const real* restrict cc44 = coeffs.c44; const real* restrict cc45 = coeffs.c45; const real* restrict cc46 = coeffs.c46; const real* restrict cc55 = coeffs.c55; const real* restrict cc56 = coeffs.c56; const real* restrict cc66 = coeffs.c66; #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copy(sxxptr[start:nelems], syyptr[start:nelems], szzptr[start:nelems], syzptr[start:nelems], sxzptr[start:nelems], sxyptr[start:nelems]) \ copyin(vxu[start:nelems], vxv[start:nelems], vxw[start:nelems]) \ copyin(vyu[start:nelems], vyv[start:nelems], vyw[start:nelems]) \ copyin(vzu[start:nelems], vzv[start:nelems], vzw[start:nelems]) \ copyin(cc11[start:nelems], cc12[start:nelems], cc13[start:nelems], cc14[start:nelems], cc15[start:nelems], cc16[start:nelems]) \ copyin(cc22[start:nelems], cc23[start:nelems], cc24[start:nelems], cc25[start:nelems], cc26[start:nelems]) \ copyin(cc33[start:nelems], cc34[start:nelems], cc35[start:nelems], cc36[start:nelems]) \ copyin(cc44[start:nelems], cc45[start:nelems], cc46[start:nelems]) \ copyin(cc55[start:nelems], cc56[start:nelems]) \ copyin(cc66[start:nelems]) \ async(phase) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for (integer y = ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for (integer x = nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL__COMPILER) #pragma simd #endif for (integer z = nz0; z < nzf; z++ ) { const real c11 = cell_coeff_BR (cc11, z, x, y, dimmz, dimmx); const real c12 = cell_coeff_BR (cc12, z, x, y, dimmz, dimmx); const real c13 = cell_coeff_BR (cc13, z, x, y, dimmz, dimmx); const real c22 = cell_coeff_BR (cc22, z, x, y, dimmz, dimmx); const real c23 = cell_coeff_BR (cc23, z, x, y, dimmz, dimmx); const real c33 = cell_coeff_BR (cc33, z, x, y, dimmz, dimmx); const real c44 = cell_coeff_BR (cc44, z, x, y, dimmz, dimmx); const real c55 = cell_coeff_BR (cc55, z, x, y, dimmz, dimmx); const real c66 = cell_coeff_BR (cc66, z, x, y, dimmz, dimmx); const real c14 = cell_coeff_ARTM_BR (cc14, z, x, y, dimmz, dimmx); const real c15 = cell_coeff_ARTM_BR (cc15, z, x, y, dimmz, dimmx); const real c16 = cell_coeff_ARTM_BR (cc16, z, x, y, dimmz, dimmx); const real c24 = cell_coeff_ARTM_BR (cc24, z, x, y, dimmz, dimmx); const real c25 = cell_coeff_ARTM_BR (cc25, z, x, y, dimmz, dimmx); const real c26 = cell_coeff_ARTM_BR (cc26, z, x, y, dimmz, dimmx); const real c34 = cell_coeff_ARTM_BR (cc34, z, x, y, dimmz, dimmx); const real c35 = cell_coeff_ARTM_BR (cc35, z, x, y, dimmz, dimmx); const real c36 = cell_coeff_ARTM_BR (cc36, z, x, y, dimmz, dimmx); const real c45 = cell_coeff_ARTM_BR (cc45, z, x, y, dimmz, dimmx); const real c46 = cell_coeff_ARTM_BR (cc46, z, x, y, dimmz, dimmx); const real c56 = cell_coeff_ARTM_BR (cc56, z, x, y, dimmz, dimmx); const real u_x = stencil_X (_SX, vxu, dxi, z, x, y, dimmz, dimmx); const real v_x = stencil_X (_SX, vxv, dxi, z, x, y, dimmz, dimmx); const real w_x = stencil_X (_SX, vxw, dxi, z, x, y, dimmz, dimmx); const real u_y = stencil_Y (_SY, vyu, dyi, z, x, y, dimmz, dimmx); const real v_y = stencil_Y (_SY, vyv, dyi, z, x, y, dimmz, dimmx); const real w_y = stencil_Y (_SY, vyw, dyi, z, x, y, dimmz, dimmx); const real u_z = stencil_Z (_SZ, vzu, dzi, z, x, y, dimmz, dimmx); const real v_z = stencil_Z (_SZ, vzv, dzi, z, x, y, dimmz, dimmx); const real w_z = stencil_Z (_SZ, vzw, dzi, z, x, y, dimmz, dimmx); stress_update (sxxptr,c11,c12,c13,c14,c15,c16,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syyptr,c12,c22,c23,c24,c25,c26,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (szzptr,c13,c23,c33,c34,c35,c36,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (syzptr,c14,c24,c34,c44,c45,c46,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxzptr,c15,c25,c35,c45,c55,c56,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); stress_update (sxyptr,c16,c26,c36,c46,c56,c66,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx ); } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66) { compute_component_scell_BR_cuda( sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ }; void compute_component_scell_BL (s_t s, point_v_t vnode_z, point_v_t vnode_x, point_v_t vnode_y, coeff_t coeffs, const real dt, const real dzi, const real dxi, const real dyi, const integer nz0, const integer nzf, const integer nx0, const integer nxf, const integer ny0, const integer nyf, const offset_t _SZ, const offset_t _SX, const offset_t _SY, const integer dimmz, const integer dimmx, const phase_t phase) { real* restrict sxxptr __attribute__ ((aligned (64))) = s.br.xx; real* restrict syyptr __attribute__ ((aligned (64))) = s.br.yy; real* restrict szzptr __attribute__ ((aligned (64))) = s.br.zz; real* restrict syzptr __attribute__ ((aligned (64))) = s.br.yz; real* restrict sxzptr __attribute__ ((aligned (64))) = s.br.xz; real* restrict sxyptr __attribute__ ((aligned (64))) = s.br.xy; const real* restrict vxu __attribute__ ((aligned (64))) = vnode_x.u; const real* restrict vxv __attribute__ ((aligned (64))) = vnode_x.v; const real* restrict vxw __attribute__ ((aligned (64))) = vnode_x.w; const real* restrict vyu __attribute__ ((aligned (64))) = vnode_y.u; const real* restrict vyv __attribute__ ((aligned (64))) = vnode_y.v; const real* restrict vyw __attribute__ ((aligned (64))) = vnode_y.w; const real* restrict vzu __attribute__ ((aligned (64))) = vnode_z.u; const real* restrict vzv __attribute__ ((aligned (64))) = vnode_z.v; const real* restrict vzw __attribute__ ((aligned (64))) = vnode_z.w; const real* restrict cc11 = coeffs.c11; const real* restrict cc12 = coeffs.c12; const real* restrict cc13 = coeffs.c13; const real* restrict cc14 = coeffs.c14; const real* restrict cc15 = coeffs.c15; const real* restrict cc16 = coeffs.c16; const real* restrict cc22 = coeffs.c22; const real* restrict cc23 = coeffs.c23; const real* restrict cc24 = coeffs.c24; const real* restrict cc25 = coeffs.c25; const real* restrict cc26 = coeffs.c26; const real* restrict cc33 = coeffs.c33; const real* restrict cc34 = coeffs.c34; const real* restrict cc35 = coeffs.c35; const real* restrict cc36 = coeffs.c36; const real* restrict cc44 = coeffs.c44; const real* restrict cc45 = coeffs.c45; const real* restrict cc46 = coeffs.c46; const real* restrict cc55 = coeffs.c55; const real* restrict cc56 = coeffs.c56; const real* restrict cc66 = coeffs.c66; #if !defined(USE_CUDA) #if defined(_OPENACC) const integer start = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (ny0 - HALO); const integer end = ((nzf-nz0) + 2*HALO) * ((nxf-nx0) + 2*HALO) * (nyf + HALO); const integer nelems = end - start; #pragma acc kernels copy(sxxptr[start:nelems], syyptr[start:nelems], szzptr[start:nelems], syzptr[start:nelems], sxzptr[start:nelems], sxyptr[start:nelems]) \ copyin(vxu[start:nelems], vxv[start:nelems], vxw[start:nelems]) \ copyin(vyu[start:nelems], vyv[start:nelems], vyw[start:nelems]) \ copyin(vzu[start:nelems], vzv[start:nelems], vzw[start:nelems]) \ copyin(cc11[start:nelems], cc12[start:nelems], cc13[start:nelems], cc14[start:nelems], cc15[start:nelems], cc16[start:nelems]) \ copyin(cc22[start:nelems], cc23[start:nelems], cc24[start:nelems], cc25[start:nelems], cc26[start:nelems]) \ copyin(cc33[start:nelems], cc34[start:nelems], cc35[start:nelems], cc36[start:nelems]) \ copyin(cc44[start:nelems], cc45[start:nelems], cc46[start:nelems]) \ copyin(cc55[start:nelems], cc56[start:nelems]) \ copyin(cc66[start:nelems]) \ async(phase) #pragma acc loop independent #elif defined(_OPENMP) #pragma omp parallel for #endif /* end pragma _OPENACC */ for (integer y = ny0; y < nyf; y++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang worker(4) #endif for (integer x = nx0; x < nxf; x++) { #if defined(_OPENACC) #pragma acc loop independent device_type(nvidia) gang vector(32) #elif defined(__INTEL__COMPILER) #pragma simd #endif for (integer z = nz0; z < nzf; z++ ) { const real c11 = cell_coeff_BL (cc11, z, x, y, dimmz, dimmx); const real c12 = cell_coeff_BL (cc12, z, x, y, dimmz, dimmx); const real c13 = cell_coeff_BL (cc13, z, x, y, dimmz, dimmx); const real c14 = cell_coeff_ARTM_BL (cc14, z, x, y, dimmz, dimmx); const real c15 = cell_coeff_ARTM_BL (cc15, z, x, y, dimmz, dimmx); const real c16 = cell_coeff_ARTM_BL (cc16, z, x, y, dimmz, dimmx); const real c22 = cell_coeff_BL (cc22, z, x, y, dimmz, dimmx); const real c23 = cell_coeff_BL (cc23, z, x, y, dimmz, dimmx); const real c24 = cell_coeff_ARTM_BL (cc24, z, x, y, dimmz, dimmx); const real c25 = cell_coeff_ARTM_BL (cc25, z, x, y, dimmz, dimmx); const real c26 = cell_coeff_ARTM_BL (cc26, z, x, y, dimmz, dimmx); const real c33 = cell_coeff_BL (cc33, z, x, y, dimmz, dimmx); const real c34 = cell_coeff_ARTM_BL (cc34, z, x, y, dimmz, dimmx); const real c35 = cell_coeff_ARTM_BL (cc35, z, x, y, dimmz, dimmx); const real c36 = cell_coeff_ARTM_BL (cc36, z, x, y, dimmz, dimmx); const real c44 = cell_coeff_BL (cc44, z, x, y, dimmz, dimmx); const real c45 = cell_coeff_ARTM_BL (cc45, z, x, y, dimmz, dimmx); const real c46 = cell_coeff_ARTM_BL (cc46, z, x, y, dimmz, dimmx); const real c55 = cell_coeff_BL (cc55, z, x, y, dimmz, dimmx); const real c56 = cell_coeff_ARTM_BL (cc56, z, x, y, dimmz, dimmx); const real c66 = cell_coeff_BL (cc66, z, x, y, dimmz, dimmx); const real u_x = stencil_X (_SX, vxu, dxi, z, x, y, dimmz, dimmx); const real v_x = stencil_X (_SX, vxv, dxi, z, x, y, dimmz, dimmx); const real w_x = stencil_X (_SX, vxw, dxi, z, x, y, dimmz, dimmx); const real u_y = stencil_Y (_SY, vyu, dyi, z, x, y, dimmz, dimmx); const real v_y = stencil_Y (_SY, vyv, dyi, z, x, y, dimmz, dimmx); const real w_y = stencil_Y (_SY, vyw, dyi, z, x, y, dimmz, dimmx); const real u_z = stencil_Z (_SZ, vzu, dzi, z, x, y, dimmz, dimmx); const real v_z = stencil_Z (_SZ, vzv, dzi, z, x, y, dimmz, dimmx); const real w_z = stencil_Z (_SZ, vzw, dzi, z, x, y, dimmz, dimmx); stress_update (sxxptr,c11,c12,c13,c14,c15,c16,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); stress_update (syyptr,c12,c22,c23,c24,c25,c26,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); stress_update (szzptr,c13,c23,c33,c34,c35,c36,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); stress_update (syzptr,c14,c24,c34,c44,c45,c46,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); stress_update (sxzptr,c15,c25,c35,c45,c55,c56,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); stress_update (sxyptr,c16,c26,c36,c46,c56,c66,z,x,y,dt,u_x,u_y,u_z,v_x,v_y,v_z,w_x,w_y,w_z,dimmz,dimmx); } } } #else /* CUDA KERNELS ENABLED */ void* stream = acc_get_cuda_stream(phase); #pragma acc host_data use_device(sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66) { compute_component_scell_BL_cuda( sxxptr, syyptr, szzptr, syzptr, sxzptr, sxyptr, vxu, vxv, vxw, vyu, vyv, vyw, vzu, vzv, vzw, cc11, cc12, cc13, cc14, cc15, cc16, cc22, cc23, cc24, cc25, cc26, cc33, cc34, cc35, cc36, cc44, cc45, cc46, cc55, cc56, cc66, dt, dzi, dxi, dyi, nz0, nzf, nx0, nxf, ny0, nyf, _SZ, _SX, _SY, dimmz, dimmx, stream); } #endif /* end USE_CUDA */ };
GJ.c
#include "GJ.h" /* --------------------------------------------- IMPLEMENTATIONS -------------------------------------------------- */ /* * Dada uma matrix e o id do processo, essa função irá dividir as linhas responsáveis pelo processo pelo valor de seus * respectivos pivots, o que irá fazer que sua diagonal seja igual a um.*/ void pivoting (const int world_rank, const int world_size, Data *data) { size_t chunk = 0, limit = 0, i = 0, j = 0; float pivot = 0; if (NULL != data) { chunk = line(data)/world_size; /* Calcula até qual linha o processo designado será responsável por pivotá-la. */ limit = (world_rank+1)*chunk; /* Cada processo fica reponsável pela sua quantidade de linhas apenas para pivotamento. */ #pragma omp parallel for for (i = world_rank*chunk; i < limit; i++) { pivot = matrix(data)[i][i]; /* Caso o pivot seja zero, o sistema no final poderá ser do tipo possível, todavia, indeterminado. */ if (0 != pivot) { /* Como há interdependência dos dados nesse loop, se pode paralelizar a tarefa de dividir a linha pelo pivot sem maiores preocupações com dependência dos valores. */ #pragma omp parallel for for (j = 0; j < col(data); j++) { matrix(data)[i][j] /= pivot; } } } } } /* * Transforma um array 2d em um array 1d. */ static void matrix_to_vector (Data *data, float *vector) { size_t i = 0, j = 0, k = 0; if (NULL != data && NULL != vector) { for (; i < line(data); i++) { for (j = 0; j < col(data); j++) { vector[k++] = matrix(data)[i][j]; } } } } /* * Transforma um array 1d em um array 2d. */ static void vector_to_matrix (Data *data, float *vector) { size_t i = 0, j = 0, k = 0; if (NULL != data && NULL != vector) { for (; i < line(data); i++) { for (j = 0; j < col(data); j++) { matrix(data)[i][j] = vector[k++]; } } } } /* * Junta a matrix que possui os pivotamentos anteriormente realizados com o atual. */ static void merge_pivoting (const int world_rank, const int world_size, Data *data, float *vector) { size_t chunk = 0, limit = 0, i = 0, j = 0, k = 0; if (NULL != data && NULL != vector) { chunk = line(data)/world_size; limit = (world_rank+1)*chunk; k = (world_rank*chunk)*col(data); for (i = world_rank*chunk; i < limit; i++) { for (j = 0; j < col(data); j++) { vector[k++] = matrix(data)[i][j]; } } } } /* * Junta todos os pivotamentos realizados om o da matrix que o processo responsável pivotou, em uma estrutura de anel. * Cada processo fica responsável por pivotar um número de linhas de maneira crescente ao seu ID. */ void merge_matrix (const int world_rank, const int world_size, Data *data) { size_t size = line(data)*col(data); float *vector = malloc(sizeof(float) * size); if (NULL != vector) { /* Uma estrutura de anel para passar as linhas do processo anterior que já foram pivotadas com a do processo atual e passar para o próximo processo. */ if (is_root(world_rank)) { matrix_to_vector(data, vector); MPI_Send(vector, size, MPI_FLOAT, world_rank+1, 0, MPI_COMM_WORLD); } else if (!is_tail(world_rank, world_size)) { MPI_Recv(vector, size, MPI_FLOAT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); /* Juntar a matrix com as linhas pivotadas até este processo com as pivotadas por este processo. */ merge_pivoting(world_rank, world_size, data, vector); MPI_Send(vector, size, MPI_FLOAT, world_rank+1, 0, MPI_COMM_WORLD); } else { /* Quando o processo for o tail, ele apenas juntará toda a informação em uma matriz final que será utilizada posteriormente para zerar as colunas. */ MPI_Recv(vector, size, MPI_FLOAT, world_rank-1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); merge_pivoting(world_rank, world_size, data, vector); vector_to_matrix(data, vector); } free(vector); } } /* Dada uma matriz e o id do processo, essa função irá fazer troca de linha quando um pivô for zero. Devido a depêndencia */ /* com todas as linhas, a troca será feita no processo principal e ao final comunicada aos outros processos */ void swapping (const int world_rank, const int world_size, Data *data, int order[]) { size_t i = 0, j = 0, k = 0; int sizeLine = line(data); int sizeCol = col(data); size_t n_elem = line(data)*col(data); float *buffer = malloc(sizeof(float) * sizeCol); float *vector = malloc(sizeof(float) * n_elem); if (NULL != data) { if (is_root(world_rank)) { for (i = 0; i < sizeLine; i++){ if (matrix(data)[i][i] == 0){ for (j = 0; j < sizeCol; j++){ buffer[j] = matrix(data)[i][j]; } for (j = 0; j < sizeLine; j++){ if (matrix(data)[j][i] > 0) { //printf("trocando linha %d por %d\n", i, j); order[i] = j; order[j] = i; for (k = 0; k < sizeCol; k++){ matrix(data)[i][k] = matrix(data)[j][k]; } for (k = 0; k < sizeCol; k++){ matrix(data)[j][k] = buffer[k]; } break; } } } } matrix_to_vector(data, vector); for (i = 0; i < world_size; i++) { if (i != world_rank) { MPI_Send(vector, n_elem, MPI_INT, i, 0, MPI_COMM_WORLD); } } } else { MPI_Recv(vector, n_elem, MPI_INT, 0, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); matrix_to_vector(data, vector); } } } void send_swap (const int world_rank, const int world_size, Data *data){ size_t n_elem = line(data)*col(data); float *vector = malloc(sizeof(float) * n_elem); if (is_root(world_rank)) { matrix_to_vector(data, vector); } MPI_Bcast(vector, n_elem, MPI_INT, 0, MPI_COMM_WORLD); if (world_rank > 0) { vector_to_matrix(data, vector); } } /* * Dada a matriz já pivotada, se zera as colunas desses pivots. */ void clear_columns (Data *data) { size_t i = 0, j = 0, k = 0; float pivot = 0, factor = 0; if (NULL != data) { /* Uma linha de cada vez da matrix será selecionada para zerar a coluna do seu pivot nas outras linhas. */ for (; i < line(data); i++) { pivot = matrix(data)[i][i]; printf("pivo[%d]: %f\n", i, matrix(data)[i][i]); /* O pivot será zero quando alguma chamada anterior acabou por zerar a sua posição. */ if (0 != pivot) { /* Seleciona-se todas as outras linhas da matriz para zerar a coluna do pivot. */ for (j = 0; j < line(data); j++) { /* Não faz sentido procurar zerar a coluna na linha do próprio pivot. */ if (i != j) { /* Dado o valor da coluna na outra linha, se cacula qual o coeficiente para multiplicar a linha do pivot para subtrair na linha em que se procura zerar a coluna. */ factor = matrix(data)[j][i]/pivot; /* Na linha que se busca zerar a coluna, subtrair a linha do pivot. */ #pragma omp parallel for for (k = 0; k < col(data); k++) { matrix(data)[j][k] -= factor*matrix(data)[i][k]; } } } } } /* Como após zerar-se as colunas os pivots podem mudar de valor, se divide as linhas pelos valores de seus pivots. */ for (i = 0; i < line(data); i++) { pivot = matrix(data)[i][i]; /* Atualiza-se o valor do pivot. */ matrix(data)[i][i] /= pivot; /* Atualiza-se o valor do resultado. */ matrix(data)[i][col(data)-1] /= pivot; } } } void write_result(Data *data, const int world_rank, int *order){ FILE *answer_file; size_t i; if (NULL != data) { if(is_root(world_rank)){ answer_file = fopen("resultado.txt", "wb"); for(i = 0; i < line(data); i++){ if(order[i] > -1){ //printf("matriz[%d][%d] = %f\n",order[i], line(data), matrix(data)[order[i]][line(data)]); fprintf(answer_file, "%f\n", matrix(data)[order[i]][line(data)]); }else{ //printf("matriz[%d][%d] = %f\n",order[i], line(data), matrix(data)[i][line(data)]); fprintf(answer_file, "%f\n", matrix(data)[i][line(data)]); } } fclose(answer_file); } } }
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/animate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/compress.h" #include "magick/constitute.h" #include "magick/delegate.h" #include "magick/deprecate.h" #include "magick/display.h" #include "magick/draw.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magic.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/module.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/profile.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/segment.h" #include "magick/semaphore.h" #include "magick/signature-private.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/timer.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" #include "magick/xwindow-private.h" /* Constant declaration. */ const char BackgroundColor[] = "#ffffff", /* white */ BorderColor[] = "#dfdfdf", /* gray */ DefaultTileFrame[] = "15x15+3+3", DefaultTileGeometry[] = "120x120+4+3>", DefaultTileLabel[] = "%f\n%G\n%b", ForegroundColor[] = "#000", /* black */ LoadImageTag[] = "Load/Image", LoadImagesTag[] = "Load/Images", MatteColor[] = "#bdbdbd", /* gray */ PSDensityGeometry[] = "72.0x72.0", PSPageGeometry[] = "612x792", SaveImageTag[] = "Save/Image", SaveImagesTag[] = "Save/Images", TransparentColor[] = "#00000000"; /* transparent black */ const double DefaultResolution = 72.0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireMagickMemory(sizeof(*image)); if (image == (Image *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MaxTextExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; image->blur=1.0; InitializeExceptionInfo(&image->exception); (void) QueryColorDatabase(BackgroundColor,&image->background_color, &image->exception); (void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception); (void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception); (void) QueryColorDatabase(TransparentColor,&image->transparent_color, &image->exception); GetTimerInfo(&image->timer); image->ping=MagickFalse; image->cache=AcquirePixelCache(0); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=time((time_t *) NULL); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AllocateSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MaxTextExtent); (void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->matte_color=image_info->matte_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); (void) SyncImageSettings(image_info,image); option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if (image->delay > (size_t) floor(geometry_info.rho+0.5)) image->delay=(size_t) floor(geometry_info.rho+0.5); } else if ((flags & LessValue) != 0) { if (image->delay < (size_t) floor(geometry_info.rho+0.5)) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } else image->delay=(size_t) floor(geometry_info.rho+0.5); if ((flags & SigmaValue) != 0) image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info)); if (image_info == (ImageInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MaxTextExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MaxTextExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, matte, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); matte=images->matte; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass) == MagickFalse) { InheritException(exception,&append_image->exception); append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace); append_image->depth=depth; append_image->matte=matte; append_image->page=images->page; (void) SetImageBackgroundColor(append_image); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict append_indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); append_indexes=GetCacheViewAuthenticIndexQueue(append_view); for (x=0; x < (ssize_t) next->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (next->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if ((next->colorspace == CMYKColorspace) && (append_image->colorspace == CMYKColorspace)) SetPixelIndex(append_indexes+x,GetPixelIndex(indexes+x)); p++; q++; } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); GetImageException(image,exception); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipImage(Image *image) { return(ClipImagePath(image,"#1",MagickTrue)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MaxTextExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(&image->exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent); (void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent); clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask); if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse); (void) FormatLocaleString(clip_mask->magick_filename,MaxTextExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageClipMask(image,clip_mask); clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { double scale; Image *clone_image; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) ResetMagickMemory(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->channels=image->channels; clone_image->colorspace=image->colorspace; clone_image->matte=image->matte; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); InitializeExceptionInfo(&clone_image->exception); InheritException(&clone_image->exception,&image->exception); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MaxTextExtent); (void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent); (void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); clone_image->clip_mask=NewImageList(); clone_image->mask=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AllocateSemaphoreInfo(); if (image->colormap != (PixelPacket *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelPacket *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) CopyMagickMemory(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } if ((columns == image->columns) && (rows == image->rows)) { if (image->clip_mask != (Image *) NULL) clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue, exception); if (image->mask != (Image *) NULL) clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) floor(scale*image->page.width+0.5); clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5); clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) floor(scale*image->page.height+0.5); clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5); clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows) == MagickFalse) { InheritException(exception,&clone_image->exception); clone_image=DestroyImage(clone_image); } return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->pen=image_info->pen; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->matte_color=image_info->matte_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colors=image_info->colors; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->preview_type=image_info->preview_type; clone_info->group=image_info->group; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; if (image_info->view != (char *) NULL) (void) CloneString(&clone_info->view,image_info->view); if (image_info->authenticate != (char *) NULL) (void) CloneString(&clone_info->authenticate,image_info->authenticate); (void) CloneImageOptions(clone_info,image_info); clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->virtual_pixel_method=image_info->virtual_pixel_method; (void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent); (void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent); (void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MaxTextExtent); clone_info->subimage=image_info->scene; /* deprecated */ clone_info->subrange=image_info->number_scenes; /* deprecated */ clone_info->channel=image_info->channel; clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return the highest severity exception. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) geometry->width; x++) { *q=(*p); if (image->colorspace == CMYKColorspace) indexes[x]=source_indexes[x]; p++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CopyImagePixels) #endif proceed=SetImageProgress(image,CopyImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); source_view=DestroyCacheView(source_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelPacket *) NULL) image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info*) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); DestroyBlob(image); (void) ClearExceptionInfo(&image->exception,MagickTrue); if (image->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->view != (char *) NULL) image_info->view=DestroyString(image_info->view); if (image_info->authenticate != (char *) NULL) image_info->authenticate=DestroyString( image_info->authenticate); DestroyImageOptions(image_info); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClipMask() returns the clip path associated with the image. % % The format of the GetImageClipMask method is: % % Image *GetImageClipMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageClipMask(const Image *image, ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->clip_mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->clip_mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageException() traverses an image sequence and returns any % error more severe than noted by the exception parameter. % % The format of the GetImageException method is: % % void GetImageException(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to a list of one or more images. % % o exception: return the highest severity exception. % */ MagickExport void GetImageException(Image *image,ExceptionInfo *exception) { register Image *next; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); for (next=image; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->exception.severity == UndefinedException) continue; if (next->exception.severity > exception->severity) InheritException(exception,&next->exception); next->exception.severity=UndefinedException; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) ResetMagickMemory(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorDatabase(BackgroundColor,&image_info->background_color, exception); (void) QueryColorDatabase(BorderColor,&image_info->border_color,exception); (void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception); (void) QueryColorDatabase(TransparentColor,&image_info->transparent_color, exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->mask == (Image *) NULL) return((Image *) NULL); return(CloneImage(image->mask,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannels() returns the number of pixel channels associated with the % specified image. % % The format of the GetChannels method is: % % size_t GetImageChannels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport size_t GetImageChannels(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(image->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename) { char *q; int c; MagickBooleanType canonical; register const char *p; size_t length; canonical=MagickFalse; length=0; (void) CopyMagickString(filename,format,MaxTextExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } if (*q == '0') { ssize_t value; value=(ssize_t) strtol(q,&q,10); (void) value; } switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format),(size_t) (MaxTextExtent- (p-format)),p,value); *q=c; (void) ConcatenateMagickString(filename,q,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MaxTextExtent]; const char *value; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; value=(const char *) NULL; #if 0 /* FUTURE: remove this code. -- Anthony 29 Arpil 2012 Removed as GetMagickProperty() will will never match a "filename:" string as this is not a 'known' image property. */ if ((image_info != (const ImageInfo *) NULL) && (image != (const Image *) NULL)) value=GetMagickProperty(image_info,image,pattern); else #endif if (image != (Image *) NULL) value=GetImageProperty(image,pattern); if ((value == (const char *) NULL) && (image != (Image *) NULL)) value=GetImageArtifact(image,pattern); if ((value == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) value=GetImageOption(image_info,pattern); if (value == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-length),value,(size_t) (MaxTextExtent-(p-format-length))); length+=strlen(pattern)-1; *q=c; (void) ConcatenateMagickString(filename,r+1,MaxTextExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) { (void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename))); canonical=MagickTrue; } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MaxTextExtent); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; MagickPixelPacket zero; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; GetMagickPixelPacket(image,&zero); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((pixel.red < 0.0) || (pixel.red > QuantumRange) || (pixel.red != (QuantumAny) pixel.red)) break; if ((pixel.green < 0.0) || (pixel.green > QuantumRange) || (pixel.green != (QuantumAny) pixel.green)) break; if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) || (pixel.blue != (QuantumAny) pixel.blue)) break; if (pixel.matte != MagickFalse) { if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) || (pixel.opacity != (QuantumAny) pixel.opacity)) break; } if (pixel.colorspace == CMYKColorspace) { if ((pixel.index < 0.0) || (pixel.index > QuantumRange) || (pixel.index != (QuantumAny) pixel.index)) break; } p++; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MaxTextExtent], filename[MaxTextExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MaxTextExtent); (void) CopyMagickString(filename,image->filename,MaxTextExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const MagickPixelPacket *background) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const MagickPixelPacket *background) { CacheView *image_view; ExceptionInfo *exception; Image *image; ssize_t y; MagickBooleanType status; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const MagickPixelPacket *) NULL); image=AcquireImage(image_info); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->matte=background->matte; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,background,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket index; MagickBooleanType status; MagickPixelPacket background; PixelPacket pixel; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) TransformImageColorspace(image,RGBColorspace); if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); GetMagickPixelPacket(image,&background); SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *) NULL,&background); if (image->colorspace == CMYKColorspace) ConvertRGBToCMYK(&background); index=0; pixel.opacity=OpaqueOpacity; SetPixelPacket(image,&background,&pixel,&index); /* Set image background color. */ status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) *q++=pixel; if (image->colorspace == CMYKColorspace) { register IndexPacket *magick_restrict indexes; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,index); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannels() sets the number of pixels channels associated with the % image. % % The format of the SetImageChannels method is: % % MagickBooleanType SetImageChannels(Image *image,const size_t channels) % % A description of each parameter follows: % % o image: the image. % % o channels: The number of pixel channels. % */ MagickExport MagickBooleanType SetImageChannels(Image *image, const size_t channels) { image->channels=channels; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image, % const MagickPixelPacket *color) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const MagickPixelPacket *color) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const MagickPixelPacket *) NULL); image->colorspace=color->colorspace; image->matte=color->matte; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelPacket(image,color,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->storage_class=storage_class; return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C l i p M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageClipMask() associates a clip path with the image. The clip path % must be the same dimensions as the image. Set any pixel component of % the clip path to TransparentOpacity to prevent that corresponding image % pixel component from being updated when SyncAuthenticPixels() is applied. % % The format of the SetImageClipMask method is: % % MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask) % % A description of each parameter follows: % % o image: the image. % % o clip_mask: the image clip path. % */ MagickExport MagickBooleanType SetImageClipMask(Image *image, const Image *clip_mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (clip_mask != (const Image *) NULL) if ((clip_mask->columns != image->columns) || (clip_mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->clip_mask != (Image *) NULL) image->clip_mask=DestroyImage(image->clip_mask); image->clip_mask=NewImageList(); if (clip_mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception); if (image->clip_mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth > (8*sizeof(MagickSizeType))) ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename); return(SyncImagePixelCache(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the `magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, `ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: `image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char extension[MaxTextExtent], filename[MaxTextExtent], magic[MaxTextExtent], *q, subimage[MaxTextExtent]; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; unsigned char magick[2*MaxTextExtent]; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *subimage='\0'; GetPathComponent(image_info->filename,SubimagePath,subimage); if (*subimage != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse) { if (IsGeometry(subimage) != MagickFalse) (void) CloneString(&image_info->extract,subimage); } else { size_t first, last; (void) CloneString(&image_info->scenes,subimage); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; image_info->subimage=image_info->scene; image_info->subrange=image_info->number_scenes; } } *extension='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,extension); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*extension != '\0') if ((LocaleCompare(extension,"gz") == 0) || (LocaleCompare(extension,"Z") == 0) || (LocaleCompare(extension,"svgz") == 0) || (LocaleCompare(extension,"wmz") == 0)) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*extension != '\0') if (LocaleCompare(extension,"bz2") == 0) { char path[MaxTextExtent]; (void) CopyMagickString(path,image_info->filename,MaxTextExtent); path[strlen(path)-strlen(extension)-1]='\0'; GetPathComponent(path,ExtensionPath,extension); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if (*extension != '\0') { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,extension,MaxTextExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MaxTextExtent); magick_info=GetMagickInfo(magic,sans_exception); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MaxTextExtent); GetPathComponent(image_info->filename,CanonicalPath,filename); (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,filename); if ((LocaleCompare(filename,image_info->filename) != 0) && (strchr(filename,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { /* Determine the image format from the first few bytes of the file. */ image=AcquireImage(image_info); (void) CopyMagickString(image->filename,image_info->filename, MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to a seekable temporary file. */ *filename='\0'; status=ImageToFile(image,filename,exception); (void) CloseBlob(image); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,filename,MaxTextExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,filename,MaxTextExtent); image_info->temporary=MagickTrue; } (void) ResetMagickMemory(magick,0,sizeof(magick)); count=ReadBlob(image,2*MaxTextExtent,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic.xml configuration file. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { (void) CopyMagickString(image_info->magick,GetMagicName(magic_info), MaxTextExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const Image *mask) % % A description of each parameter follows: % % o image: the image. % % o mask: the image mask. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const Image *mask) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask != (const Image *) NULL) if ((mask->columns != image->columns) || (mask->rows != image->rows)) ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename); if (image->mask != (Image *) NULL) image->mask=DestroyImage(image->mask); image->mask=NewImageList(); if (mask == (Image *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception); if (image->mask == (Image *) NULL) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e O p a c i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageOpacity() sets the opacity levels of the image. % % The format of the SetImageOpacity method is: % % MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % */ MagickExport MagickBooleanType SetImageOpacity(Image *image, const Quantum opacity) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->matte=MagickTrue; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelOpacity(q,opacity); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const PixelPacket *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const PixelPacket *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const PixelPacket *) NULL) || (GetPixelOpacity(p) != TransparentOpacity) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" CacheView *smush_view; const Image *image; Image *smush_image; MagickBooleanType matte, proceed, status; MagickOffsetType n; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; matte=image->matte; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->matte != MagickFalse) matte=MagickTrue; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse) { InheritException(exception,&smush_image->exception); smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->matte=matte; (void) SetImageBackgroundColor(smush_image); status=MagickTrue; x_offset=0; y_offset=0; smush_view=AcquireVirtualCacheView(smush_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; smush_view=DestroyCacheView(smush_view); if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType StripImage(Image *image) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline IndexPacket PushColormapIndex(Image *image, const size_t index,MagickBooleanType *range_exception) { if (index < image->colors) return((IndexPacket) index); *range_exception=MagickTrue; return((IndexPacket) 0); } MagickExport MagickBooleanType SyncImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelPacket *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { IndexPacket index; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,(size_t) GetPixelIndex(indexes+x), &range_exception); if (image->matte == MagickFalse) SetPixelRgb(q,image->colormap+(ssize_t) index) else SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(&image->exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs image_info options into per-image attributes. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image) { char property[MaxTextExtent]; const char *option, *value; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->background_color, &image->exception); option=GetImageOption(image_info,"bias"); if (option != (const char *) NULL) image->bias=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->border_color,&image->exception); option=GetImageOption(image_info,"colors"); if (option != (const char *) NULL) image->colors=StringToUnsignedLong(option); option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { GeometryInfo geometry_info; /* Set image density. */ flags=ParseGeometry(option,&geometry_info); image->x_resolution=geometry_info.rho; image->y_resolution=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->y_resolution=image->x_resolution; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterTypes) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(InterpolatePixelMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->matte_color,&image->exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&image->transparent_color, &image->exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); else units = image_info->units; if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->x_resolution/=2.54; image->y_resolution/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->x_resolution=(double) ((size_t) (100.0*2.54* image->x_resolution+0.5))/100.0; image->y_resolution=(double) ((size_t) (100.0*2.54* image->y_resolution+0.5))/100.0; } break; } default: break; } image->units=units; } option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } ResetImageOptionIterator(image_info); for (option=GetNextImageOption(image_info); option != (const char *) NULL; ) { value=GetImageOption(image_info,option); if (value != (const char *) NULL) { (void) FormatLocaleString(property,MaxTextExtent,"%s",option); (void) SetImageArtifact(image,property,value); } option=GetNextImageOption(image_info); } return(MagickTrue); }
ex2.c
#include <stdio.h> #include <omp.h> #define NUM_THREADS 4 static long num_steps = 10000000; double step; int main() { double pi, sum; int i, ts_num; step = 1.0/(double)num_steps; // set number of used threads omp_set_num_threads(NUM_THREADS); #pragma omp parallel { int i, ts_nums, id; double x; double l_sum = 0.0; // get curretn thread ID id = omp_get_thread_num(); // get threads num ts_nums = omp_get_num_threads(); // only first thread is specyfing the threads numbers if (id == 0) ts_num = ts_nums; for(i=id=0.0;i < num_steps;i=i+ts_nums) { x = (i+0.5)*step; l_sum += 4.0/(1.0 + x*x); } // add to the global sum #pragma omp atomic sum += l_sum; } // end of OMP PARALLEL pi = sum*step; printf("pi is %f\n", pi); }
pr66714.c
/* { dg-do "compile" } */ /* { dg-additional-options "--param ggc-min-expand=0" } */ /* { dg-additional-options "--param ggc-min-heapsize=0" } */ /* { dg-additional-options "-g" } */ /* Minimized from on target-2.c. */ void fn3 (int x) { double b[3 * x]; int i; #pragma omp target #pragma omp parallel for for (i = 0; i < x; i++) b[i] += 1; }
test.c
#include <stdlib.h> #include <stdio.h> #include "omp.h" #include "../utilities/check.h" #include "../utilities/utilities.h" #define N 10 int main() { double a[N], a_h[N]; double b[N], c[N]; double d[N], d_h[N]; int fail = 0; check_offloading(); long cpuExec = 0; #pragma omp target map(tofrom: cpuExec) { cpuExec = omp_is_initial_device(); } // taskloop is only implemented on the gpu if (!cpuExec) { // Test: basic with shared for(int i = 0 ; i < N ; i++) { a[i] = a_h[i] = 0; b[i] = i; c[i] = i-7; d[i] = d_h[i] = i+12; } #pragma omp target map(tofrom:a) map(to:b,c) { #pragma omp parallel #pragma omp single #pragma omp taskgroup #pragma omp taskloop shared(a) for(int i = 0 ; i < N; i++) { d[i] += b[i] + c[i]; } // handle dependency between two taskloop using taskgroup // as tasks are immediately executed, no need for further // logic to synchronize #pragma omp taskgroup #pragma omp taskloop shared(a) for(int i = 0 ; i < N; i++) { a[i] += d[i]; } } for(int i = 0 ; i < N; i++) { d_h[i] += b[i] + c[i]; a_h[i] += d_h[i]; } for(int i = 0 ; i < N; i++) if (a[i] != a_h[i]) { printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]); fail = 1; } if (fail) printf("Failed\n"); else printf("Succeeded\n"); } else // if !cpuExec DUMP_SUCCESS(1); return 0; }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColourLookup { DDSSourceBlock sources[2]; } DDSSingleColourLookup; typedef MagickBooleanType DDSDecoder(Image *, DDSInfo *, ExceptionInfo *); static const DDSSingleColourLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColourLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColourLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if (max - min < steps) \ max = MagickMin(min + steps, 255); \ if (max - min < steps) \ min = MagickMax(0, max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ /* Forward declarations */ static MagickBooleanType ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3, DDSVector4 *, DDSVector4 *, unsigned char *, size_t), ReadDDSInfo(Image *,DDSInfo *), ReadDXT1(Image *,DDSInfo *,ExceptionInfo *), ReadDXT3(Image *,DDSInfo *,ExceptionInfo *), ReadDXT5(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *), ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *), SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *), WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *), WriteMipmaps(Image *,const size_t,const size_t,const size_t, const MagickBooleanType,const MagickBooleanType,ExceptionInfo *); static void RemapIndices(const ssize_t *,const unsigned char *,unsigned char *), WriteDDSInfo(Image *,const size_t,const size_t,const size_t), WriteFourCC(Image *,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType, const MagickBooleanType,ExceptionInfo *), WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *), WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *), WriteUncompressed(Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; register ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; register ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColourLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { register ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; register ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z)); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; register ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType status, cubemap = MagickFalse, volume = MagickFalse; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) { ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; (void) SeekBlob(image, 128, SEEK_SET); /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression = compression; image->columns = dds_info.width; image->rows = dds_info.height; image->storage_class = DirectClass; image->endian = LSBEndian; image->depth = 8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); if ((decoder)(image, &dds_info, exception) != MagickTrue) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ return MagickTrue; } static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; size_t bits; ssize_t j, y; unsigned char code; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelAlpha(image,ScaleCharToQuantum(colors.a[code]),q); if (colors.a[code] && (image->alpha_trait == UndefinedPixelTrait)) image->alpha_trait=BlendPixelTrait; /* Correct matte */ q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; register Quantum *q; register ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; register Quantum *q; register ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; for (y = 0; y < (ssize_t) dds_info->height; y += 4) { for (x = 0; x < (ssize_t) dds_info->width; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x), MagickMin(4, dds_info->height - y),exception); if (q == (Quantum *) NULL) return MagickFalse; /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } } return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { register Quantum *q; ssize_t x, y; unsigned short color; if (dds_info->pixelformat.rgb_bitcount == 8) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (Quantum *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 8) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info, ExceptionInfo *exception) { register Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } for (y = 0; y < (ssize_t) dds_info->height; y++) { q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception); if (q == (Quantum *) NULL) return MagickFalse; for (x = 0; x < (ssize_t) dds_info->width; x++) { if (dds_info->pixelformat.rgb_bitcount == 16) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return MagickFalse; } return(SkipRGBMipmaps(image,dds_info,4,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { register ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size; if (SeekBlob(image, offset, SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { MagickOffsetType offset; register ssize_t i; size_t h, w; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { w = DIV2(dds_info->width); h = DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset = (MagickOffsetType) w * h * pixel_size; if (SeekBlob(image, offset, SEEK_CUR) < 0) break; w = DIV2(w); h = DIV2(h); } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { register ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } maxMipmaps=SIZE_MAX; mipmaps=0; if ((image->columns & (image->columns - 1)) == 0 && (image->rows & (image->rows - 1)) == 0) { option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while (columns != 1 && rows != 1 && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } WriteDDSInfo(image,pixelFormat,compression,mipmaps); WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps, clusterFit,weightByAlpha,exception) == MagickFalse) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; register ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) ResetMagickMemory(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) // bitcount / masks (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) // ddscaps2 + reserved region (void) WriteBlobLSBLong(image,0x00); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { register ssize_t x; ssize_t i, y, bx, by; register const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { register ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { Image* resize_image; register ssize_t i; size_t columns, rows; columns = image->columns; rows = image->rows; for (i=0; i< (ssize_t) mipmaps; i++) { resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter, exception); if (resize_image == (Image *) NULL) return(MagickFalse); DestroyBlob(resize_image); resize_image->blob=ReferenceBlob(image->blob); WriteImageData(resize_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); resize_image=DestroyImage(resize_image); columns = DIV2(columns); rows = DIV2(rows); } return(MagickTrue); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; register ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { register const Quantum *p; register ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } }
hip_runtime.h
/* * This file is part of hipCPU, a HIP implementation based on OpenMP * * Copyright (c) 2018,2019 Aksel Alpay * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef HIPCPU_RUNTIME_H #define HIPCPU_RUNTIME_H #define __HIPCPU__ #ifndef __global__ #define __global__ #endif #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #ifndef __constant__ #define __constant__ const #endif #ifndef __shared__ #define __shared__ static #endif #include <cstddef> #include <climits> #include <cstring> #include <limits> #include <memory> #include <cmath> #include <stdexcept> #include "detail/runtime.hpp" using hipcpu::dim3; #define HIP_KERNEL_NAME(...) __VA_ARGS__ typedef int hipLaunchParm; #define _hipcpu_runtime (hipcpu::runtime::get()) // Use a macro instead of a function with variadic template arguments // to avoid different properties of kernel template argument deduction // based on kernel arguments compared to AMDs implementation #define hipLaunchKernelGGL(f, grid, block, shared_mem, stream, ...) \ _hipcpu_runtime.submit_kernel(grid, block, shared_mem, stream, \ [=](){ \ f(__VA_ARGS__); \ }) #define hipLaunchKernel(f, grid, block, shared_mem, stream, ...) \ hipLaunchKernelGGL(f, grid, block, shared_mem, stream, 0, __VA_ARGS__) #define hipLaunchTask(f, stream, ...) \ _hipcpu_runtime.submit_operation([=](){\ f(__VA_ARGS__); \ }, stream) #define hipLaunchSequentialKernel(f, stream, scratch_mem, ...) \ _hipcpu_runtime.submit_unparallelized_kernel(scratch_mem, stream, \ [=](){ \ f(__VA_ARGS__); \ }) // TODO #define hipLaunchKernelNoBarrier(f, grid, block, stream, ...) #define HIP_DYNAMIC_SHARED_MEMORY _hipcpu_runtime.dev().get_dynamic_shared_memory() // TODO This dev() may be different if changed during kernel execution? // This is not a problem at the moment since we only treat the case of // one effective host device for now. #define hipThreadIdx_x (_hipcpu_runtime.dev().get_block().get_thread_id().x) #define hipThreadIdx_y (_hipcpu_runtime.dev().get_block().get_thread_id().y) #define hipThreadIdx_z (_hipcpu_runtime.dev().get_block().get_thread_id().z) #define hipBlockIdx_x (_hipcpu_runtime.dev().get_grid().get_block_id().x) #define hipBlockIdx_y (_hipcpu_runtime.dev().get_grid().get_block_id().y) #define hipBlockIdx_z (_hipcpu_runtime.dev().get_grid().get_block_id().z) #define hipBlockDim_x (_hipcpu_runtime.dev().get_block().get_block_dim().x) #define hipBlockDim_y (_hipcpu_runtime.dev().get_block().get_block_dim().y) #define hipBlockDim_z (_hipcpu_runtime.dev().get_block().get_block_dim().z) #define hipGridDim_x (_hipcpu_runtime.dev().get_grid().get_grid_dim().x) #define hipGridDim_y (_hipcpu_runtime.dev().get_grid().get_grid_dim().y) #define hipGridDim_z (_hipcpu_runtime.dev().get_grid().get_grid_dim().z) #define HIP_SYMBOL(X) X typedef enum hipMemcpyKind { hipMemcpyHostToHost, hipMemcpyHostToDevice, hipMemcpyDeviceToHost, hipMemcpyDeviceToDevice, hipMemcpyDefault } hipMemcpyKind; /* Textures are unimplemented // hipTextureAddressMode #define hipTextureAddressMode 0 #define hipAddressModeWrap 0 #define hipAddressModeClamp 0 #define hipAddressModeMirror 0 #define hipAddressModeBorder 0 // hipTextureFilterMode #define hipTextureFilterMode 0 #define hipFilterModePoint 0 #define hipFilterModeLinear 0 // hipTextureReadMode enum hipTextureReadMode {}; #define hipReadModeElementType 0 #define hipReadModeNormalizedFloat 0 template<class T, int dim, hipTextureReadMode readMode> struct texture {}; typedef enum hipChannelFormatKind { hipChannelFormatKindSigned = 0, hipChannelFormatKindUnsigned = 1, hipChannelFormatKindFloat = 2, hipChannelFormatKindNone = 3 } hipChannelFormatKind; #define hipSurfaceBoundaryMode 0 #define hipBoundaryModeZero 0 #define hipBoundaryModeTrap 0 #define hipBoundaryModeClamp 0 // hipResourceType #define hipResourceType 0 #define hipResourceTypeArray 0 #define hipResourceTypeMipmappedArray 0 #define hipResourceTypeLinear 0 #define hipResourceTypePitch2D 0 */ #define hipEventDefault hipEvent_t() #define hipEventBlockingSync 0 #define hipEventDisableTiming 0 #define hipEventInterprocess 0 #define hipEventReleaseToDevice 0 #define hipEventReleaseToSystem 0 #define hipHostMallocDefault 0x0 #define hipHostMallocPortable 0x1 #define hipHostMallocMapped 0x2 #define hipHostMallocWriteCombined 0x4 #define hipHostMallocCoherent 0x40000000 #define hipHostMallocNonCoherent 0x80000000 #define hipHostRegisterPortable 0 #define hipHostRegisterMapped 0 typedef int hipEvent_t; typedef int hipStream_t; typedef int hipIpcEventHandle_t; typedef int hipIpcMemHandle_t; typedef int hipLimit_t; typedef int hipFuncCache_t; typedef int hipCtx_t; typedef int hipSharedMemConfig; typedef int hipFuncCache; typedef int hipJitOption; typedef int hipDevice_t; typedef int hipModule_t; typedef int hipFunction_t; typedef void* hipDeviceptr_t; typedef int hipArray; typedef int* hipArray_const_t; typedef int hipFuncAttributes; typedef int hipCtx_t; typedef int hipTextureObject_t; typedef int hipSurfaceObject_t; typedef int hipResourceDesc; typedef int hipTextureDesc; typedef int hipResourceViewDesc; typedef int textureReference; enum hipError_t { hipSuccess, hipErrorInvalidContext, hipErrorInvalidKernelFile, hipErrorMemoryAllocation, hipErrorInitializationError, hipErrorLaunchFailure, hipErrorLaunchOutOfResources, hipErrorInvalidDevice, hipErrorInvalidValue, hipErrorInvalidDevicePointer, hipErrorInvalidMemcpyDirection, hipErrorUnknown, hipErrorInvalidResourceHandle, hipErrorNotReady, hipErrorNoDevice, hipErrorPeerAccessAlreadyEnabled, hipErrorPeerAccessNotEnabled, hipErrorRuntimeMemory, hipErrorRuntimeOther, hipErrorHostMemoryAlreadyRegistered, hipErrorHostMemoryNotRegistered, hipErrorMapBufferObjectFailed, hipErrorTbd }; typedef void* hipPitchedPtr; //struct hipExtent {}; //struct hipChannelFormatDesc {}; struct hipDeviceArch_t { unsigned hasGlobalInt32Atomics : 1; unsigned hasGlobalFloatAtomicExch : 1; unsigned hasSharedInt32Atomics : 1; unsigned hasSharedFloatAtomicExch : 1; unsigned hasFloatAtomicAdd : 1; // 64-bit Atomics unsigned hasGlobalInt64Atomics : 1; unsigned hasSharedInt64Atomics : 1; // Doubles unsigned hasDoubles : 1; // Warp cross-lane operations unsigned hasWarpVote : 1; unsigned hasWarpBallot : 1; unsigned hasWarpShuffle : 1; unsigned hasFunnelShift : 1; // Sync unsigned hasThreadFenceSystem : 1; unsigned hasSyncThreadsExt : 1; // Misc unsigned hasSurfaceFuncs : 1; unsigned has3dGrid : 1; unsigned hasDynamicParallelism : 1; }; struct hipDeviceProp_t { char name[256]; size_t totalGlobalMem; size_t sharedMemPerBlock; int regsPerBlock; int warpSize; int maxThreadsPerBlock; int maxThreadsDim[3]; int maxGridSize[3]; int clockRate; int memoryClockRate; int memoryBusWidth; size_t totalConstMem; int major; int minor; int multiProcessorCount; int l2CacheSize; int maxThreadsPerMultiProcessor; int computeMode; int clockInstructionRate; hipDeviceArch_t arch; int concurrentKernels; int pciBusID; int pciDeviceID; size_t maxSharedMemoryPerMultiProcessor; int isMultiGpuBoard; int canMapHostMemory; int gcnArch; }; struct hipMemcpy3DParms {}; enum hipDeviceAttribute_t { hipDeviceAttributeMaxThreadsPerBlock, hipDeviceAttributeMaxBlockDimX, hipDeviceAttributeMaxBlockDimY, hipDeviceAttributeMaxBlockDimZ, hipDeviceAttributeMaxGridDimX, hipDeviceAttributeMaxGridDimY, hipDeviceAttributeMaxGridDimZ, hipDeviceAttributeMaxSharedMemoryPerBlock, hipDeviceAttributeTotalConstantMemory, hipDeviceAttributeWarpSize, hipDeviceAttributeMaxRegistersPerBlock, hipDeviceAttributeClockRate, hipDeviceAttributeMemoryClockRate, hipDeviceAttributeMemoryBusWidth, hipDeviceAttributeMultiprocessorCount, hipDeviceAttributeComputeMode, hipDeviceAttributeL2CacheSize, hipDeviceAttributeMaxThreadsPerMultiProcessor, hipDeviceAttributeComputeCapabilityMajor, hipDeviceAttributeComputeCapabilityMinor, hipDeviceAttributeConcurrentKernels, hipDeviceAttributePciBusId, hipDeviceAttributePciDeviceId, hipDeviceAttributeMaxSharedMemoryPerMultiprocessor, hipDeviceAttributeIsMultiGpuBoard, hipDeviceAttributeIntegrated, }; struct hipPointerAttribute_t { hipDevice_t device; hipDeviceptr_t devicePointer; void* hostPointer; bool isManaged; int allocationFlags; }; #define hipStreamDefault 0 #define hipStreamNonBlocking 0 #define hipSharedMemBankSizeDefault 0 #define hipSharedMemBankSizeFourByte 0 #define hipSharedMemBankSizeEightByte 0 typedef void(*hipStreamCallback_t)(hipStream_t, hipError_t, void*); /* hipError_t hipDeviceReset(); hipError_t hipGetLastError(); hipError_t hipPeekAtLastError(); */ inline hipError_t hipMalloc(void** ptr, size_t size) { *ptr = hipcpu::detail::aligned_malloc(hipcpu::detail::default_alignment, size); if(*ptr == nullptr) return hipErrorMemoryAllocation; return hipSuccess; } //hipError_t hipMallocPitch(void** ptr, size_t* pitch, size_t width, size_t height); //hipError_t hipMalloc3D(hipPitchedPtr* pitchedDevPtr, hipExtent extent); inline hipError_t hipFree(void* ptr) { hipcpu::detail::aligned_free(ptr); return hipSuccess; } inline hipError_t hipMallocHost(void** ptr, size_t size) { return hipMalloc(ptr, size); } #define hipMemAttachGlobal 0 #define hipMemAttachHost 1 template<class T> inline hipError_t hipMallocManaged(T** ptr, size_t size, unsigned flags = hipMemAttachGlobal) { return hipMalloc(reinterpret_cast<void**>(ptr), size); } inline hipError_t hipHostAlloc(void** ptr, size_t size, unsigned int flags) { return hipMalloc(ptr, size); } inline hipError_t hipHostMalloc(void** ptr, size_t size, unsigned int flags) { return hipMalloc(ptr, size); } /* hipError_t hipMallocArray(hipArray** array, const hipChannelFormatDesc* desc, size_t width, size_t height, unsigned int flags); hipError_t hipMalloc3DArray(hipArray** array, const struct hipChannelFormatDesc* desc, struct hipExtent extent, unsigned int flags); hipError_t hipFreeArray(hipArray* array); hipError_t hipHostGetDevicePointer(void** devPtr, void* hostPtr, unsigned int flags); hipError_t hipHostGetFlags(unsigned int* flagsPtr, void* hostPtr); hipError_t hipHostRegister(void* ptr, size_t size, unsigned int flags); hipError_t hipHostUnregister(void* ptr);*/ inline hipError_t hipFreeHost(void* ptr) { return hipFree(ptr); } inline hipError_t hipHostFree(void* ptr) { return hipFree(ptr); } inline hipError_t hipSetDevice(int device) { if(device != 0) return hipErrorInvalidDevice; _hipcpu_runtime.set_device(device); return hipSuccess; } //hipError_t hipChooseDevice(int* device, const hipDeviceProp_t* prop); inline hipError_t hipStreamCreate(hipStream_t* stream) { *stream = _hipcpu_runtime.create_blocking_stream(); return hipSuccess; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamCreateWithFlags(hipStream_t* stream, unsigned int flags) { if(flags == hipStreamDefault) return hipStreamCreate(stream); else if (flags == hipStreamNonBlocking) { *stream = _hipcpu_runtime.create_async_stream(); return hipSuccess; } return hipErrorInvalidValue; } inline hipError_t hipStreamSynchronize(hipStream_t stream) { _hipcpu_runtime.streams().get(stream)->wait(); return hipSuccess; } inline hipError_t hipStreamDestroy(hipStream_t stream) { _hipcpu_runtime.destroy_stream(stream); return hipSuccess; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamWaitEvent(hipStream_t stream, hipEvent_t event, unsigned int flags) { std::shared_ptr<hipcpu::event> evt = _hipcpu_runtime.events().get_shared(event); _hipcpu_runtime.submit_operation([evt](){ // TODO store error code evt->wait(); }, stream); return hipSuccess; } inline hipError_t hipStreamQuery(hipStream_t stream) { hipcpu::stream* s = _hipcpu_runtime.streams().get(stream); if(s->is_idle()) return hipSuccess; return hipErrorNotReady; } //TODO Make sure semantics are correct for all allowed values of flags inline hipError_t hipStreamAddCallback(hipStream_t stream, hipStreamCallback_t callback, void *userData, unsigned int flags) { _hipcpu_runtime.submit_operation([stream, callback, userData](){ // TODO guarantee correct error propagation callback(stream, hipSuccess, userData); }, stream); return hipSuccess; } inline hipError_t hipMemcpyAsync(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind copyKind, hipStream_t stream = 0) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ memcpy(dst, src, sizeBytes); }, stream); return hipSuccess; } inline hipError_t hipMemcpy(void* dst, const void* src, size_t sizeBytes, hipMemcpyKind copyKind) { hipMemcpyAsync(dst, src, sizeBytes, copyKind, 0); _hipcpu_runtime.streams().get(0)->wait(); return hipSuccess; } inline hipError_t hipMemcpyHtoD(hipDeviceptr_t dst, void* src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyHostToDevice); } inline hipError_t hipMemcpyDtoH(void* dst, hipDeviceptr_t src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyDeviceToHost); } inline hipError_t hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src, size_t size) { return hipMemcpy(dst, src, size, hipMemcpyDeviceToDevice); } inline hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dst, void* src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice, stream); } inline hipError_t hipMemcpyDtoHAsync(void* dst, hipDeviceptr_t src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyDeviceToHost, stream); } inline hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src, size_t size, hipStream_t stream) { return hipMemcpyAsync(dst, src, size, hipMemcpyDeviceToDevice, stream); } inline hipError_t hipMemcpyToSymbolAsync(const void* symbol, const void* src, size_t sizeBytes, size_t offset, hipMemcpyKind copyType, hipStream_t stream = 0) { char* base_ptr = static_cast<char*>(const_cast<void*>(symbol)); void* ptr = static_cast<void*>(base_ptr + offset); return hipMemcpyAsync(ptr, src, sizeBytes, copyType, stream); } inline hipError_t hipMemcpyFromSymbolAsync(void* dst, const void* symbolName, size_t sizeBytes, size_t offset, hipMemcpyKind kind, hipStream_t stream = 0) { const void* ptr = static_cast<const void*>(static_cast<const char*>(symbolName)+offset); return hipMemcpyAsync(dst, ptr, sizeBytes, kind, stream); } inline hipError_t hipMemcpyToSymbol(const void* symbol, const void* src, size_t sizeBytes, size_t offset = 0, hipMemcpyKind copyType = hipMemcpyHostToDevice) { hipError_t err = hipMemcpyToSymbolAsync(symbol, src, sizeBytes, offset, copyType, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } inline hipError_t hipMemcpyFromSymbol(void *dst, const void *symbolName, size_t sizeBytes, size_t offset = 0, hipMemcpyKind kind = hipMemcpyDeviceToHost) { hipError_t err = hipMemcpyFromSymbolAsync(dst, symbolName, sizeBytes, offset, kind, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } hipError_t hipMemcpy3D(const struct hipMemcpy3DParms *p); inline hipError_t hipMemcpy2DAsync(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind, hipStream_t stream) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ for(size_t row = 0; row < height; ++row) { void* row_dst_begin = reinterpret_cast<char*>(dst) + row * dpitch; const void* row_src_begin = reinterpret_cast<const char*>(src) + row * spitch; memcpy(row_dst_begin, row_src_begin, width); } }, stream); return hipSuccess; } inline hipError_t hipMemcpy2D(void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind) { hipError_t err = hipMemcpy2DAsync(dst, dpitch, src, spitch, width, height, kind, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return err; } hipError_t hipMemcpy2DToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src, size_t spitch, size_t width, size_t height, hipMemcpyKind kind); hipError_t hipMemcpyToArray(hipArray* dst, size_t wOffset, size_t hOffset, const void* src, size_t count, hipMemcpyKind kind); hipError_t hipMemcpyFromArray(void* dst, hipArray_const_t srcArray, size_t wOffset, size_t hOffset, size_t count, hipMemcpyKind kind); hipError_t hipMemcpyAtoH(void* dst, hipArray* srcArray, size_t srcOffset, size_t count); hipError_t hipMemcpyHtoA(hipArray* dstArray, size_t dstOffset, const void* srcHost, size_t count); inline hipError_t hipDeviceSynchronize() { _hipcpu_runtime.streams().for_each([](hipcpu::stream* s){ s->wait(); }); return hipSuccess; } hipError_t hipDeviceGetCacheConfig(hipFuncCache_t* pCacheConfig); const char* hipGetErrorString(hipError_t error); const char* hipGetErrorName(hipError_t error); inline hipError_t hipGetDeviceCount(int* count) { *count = 1; return hipSuccess; } inline hipError_t hipGetDevice(int* device) { *device = 0; return hipSuccess; } /* hipError_t hipIpcCloseMemHandle(void* devPtr); hipError_t hipIpcGetEventHandle(hipIpcEventHandle_t* handle, hipEvent_t event); hipError_t hipIpcGetMemHandle(hipIpcMemHandle_t* handle, void* devPtr); hipError_t hipIpcOpenEventHandle(hipEvent_t* event, hipIpcEventHandle_t handle); hipError_t hipIpcOpenMemHandle(void** devPtr, hipIpcMemHandle_t handle, unsigned int flags); */ inline hipError_t hipMemsetAsync(void* devPtr, int value, size_t count, hipStream_t stream = 0) { if(!_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; _hipcpu_runtime.submit_operation([=](){ memset(devPtr, value, count); }, stream); return hipSuccess; } inline hipError_t hipMemset(void* devPtr, int value, size_t count) { hipError_t err = hipMemsetAsync(devPtr, value, count, 0); if(err != hipSuccess) return err; _hipcpu_runtime.streams().get(0)->wait(); return hipSuccess; } inline hipError_t hipMemsetD8(hipDeviceptr_t dest, unsigned char value, size_t sizeBytes) { return hipMemset(dest, value, sizeBytes); } /* hipError_t hipMemset2D(void* dst, size_t pitch, int value, size_t width, size_t height); hipError_t hipMemset2DAsync(void* dst, size_t pitch, int value, size_t width, size_t height, hipStream_t stream = 0); hipError_t hipMemset3D(hipPitchedPtr pitchedDevPtr, int value, hipExtent extent ); hipError_t hipMemset3DAsync(hipPitchedPtr pitchedDevPtr, int value, hipExtent extent, hipStream_t stream = 0); */ inline hipError_t hipGetDeviceProperties(hipDeviceProp_t* p_prop, int device) { if(device != 0) return hipErrorInvalidDevice; std::string device_name = "hipCPU OpenMP host device"; int max_dim = std::numeric_limits<int>::max(); strncpy(p_prop->name, device_name.c_str(), 256); // TODO: Find available memory p_prop->totalGlobalMem = std::numeric_limits<size_t>::max(); p_prop->sharedMemPerBlock = _hipcpu_runtime.dev().get_max_shared_memory(); p_prop->regsPerBlock = std::numeric_limits<int>::max(); p_prop->warpSize = 1; p_prop->maxThreadsPerBlock = _hipcpu_runtime.dev().get_max_threads(); p_prop->maxGridSize[0] = max_dim; p_prop->maxGridSize[1] = max_dim; p_prop->maxGridSize[2] = max_dim; p_prop->maxGridSize[0] = max_dim; p_prop->maxGridSize[1] = max_dim; p_prop->maxGridSize[2] = max_dim; // TODO: Find actual value p_prop->clockRate = 1; p_prop->memoryClockRate = 1; p_prop->memoryBusWidth = 1; p_prop->totalConstMem = std::numeric_limits<std::size_t>::max(); p_prop->major = 1; p_prop->minor = 0; p_prop->multiProcessorCount = _hipcpu_runtime.dev().get_num_compute_units(); // TODO: Find actual value p_prop->l2CacheSize = std::numeric_limits<int>::max(); p_prop->maxThreadsPerMultiProcessor = p_prop->maxThreadsPerBlock; p_prop->computeMode = 0; p_prop->clockInstructionRate = p_prop->clockRate; hipDeviceArch_t arch; arch.hasGlobalInt32Atomics = 1; arch.hasGlobalFloatAtomicExch = 1; arch.hasSharedInt32Atomics = 1; arch.hasSharedFloatAtomicExch = 1; arch.hasFloatAtomicAdd = 1; arch.hasGlobalInt64Atomics = 1; arch.hasSharedInt64Atomics = 1; arch.hasDoubles = 1; arch.hasWarpVote = 0; arch.hasWarpBallot = 0; arch.hasWarpShuffle = 0; arch.hasFunnelShift = 0; arch.hasThreadFenceSystem = 1; arch.hasSyncThreadsExt = 1; arch.hasSurfaceFuncs = 0; arch.has3dGrid = 1; arch.hasDynamicParallelism = 0; p_prop->arch = arch; p_prop->concurrentKernels = 1; p_prop->pciBusID = 0; p_prop->pciDeviceID = 0; p_prop->maxSharedMemoryPerMultiProcessor = p_prop->sharedMemPerBlock; p_prop->isMultiGpuBoard = 0; p_prop->canMapHostMemory = 1; p_prop->gcnArch = 0; return hipSuccess; } hipError_t hipDeviceGetAttribute(int* pi, hipDeviceAttribute_t attr, int device); hipError_t hipOccupancyMaxActiveBlocksPerMultiprocessor(int* numBlocks, const void* func, int blockSize, size_t dynamicSMemSize); hipError_t hipPointerGetAttributes(hipPointerAttribute_t* attributes, void* ptr); hipError_t hipMemGetInfo(size_t* free, size_t* total); inline hipError_t hipEventCreate(hipEvent_t* event) { *event = _hipcpu_runtime.create_event(); return hipSuccess; } inline hipError_t hipEventRecord(hipEvent_t event, hipStream_t stream = 0) { if(!_hipcpu_runtime.events().is_valid(event) || !_hipcpu_runtime.streams().is_valid(stream)) return hipErrorInvalidValue; std::shared_ptr<hipcpu::event> evt = _hipcpu_runtime.events().get_shared(event); _hipcpu_runtime.submit_operation([evt](){ evt->mark_as_finished(); }, stream); return hipSuccess; } inline hipError_t hipEventSynchronize(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; hipcpu::event* evt = _hipcpu_runtime.events().get(event); evt->wait(); if(evt->is_complete()) return hipSuccess; return hipErrorUnknown; } hipError_t hipEventElapsedTime(float* ms, hipEvent_t start, hipEvent_t stop); inline hipError_t hipEventDestroy(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; _hipcpu_runtime.destroy_event(event); return hipSuccess; } hipError_t hipDriverGetVersion(int* driverVersion); inline hipError_t hipRuntimeGetVersion(int* runtimeVersion) { *runtimeVersion = 99999; return hipSuccess; } hipError_t hipDeviceCanAccessPeer(int* canAccessPeer, int device, int peerDevice); hipError_t hipDeviceDisablePeerAccess(int peerDevice); hipError_t hipDeviceEnablePeerAccess(int peerDevice, unsigned int flags); hipError_t hipCtxDisablePeerAccess(hipCtx_t peerCtx); hipError_t hipCtxEnablePeerAccess(hipCtx_t peerCtx, unsigned int flags); hipError_t hipDevicePrimaryCtxGetState(hipDevice_t dev, unsigned int* flags, int* active); hipError_t hipDevicePrimaryCtxRelease(hipDevice_t dev); hipError_t hipDevicePrimaryCtxRetain(hipCtx_t* pctx, hipDevice_t dev); hipError_t hipDevicePrimaryCtxReset(hipDevice_t dev); hipError_t hipDevicePrimaryCtxSetFlags(hipDevice_t dev, unsigned int flags); hipError_t hipMemGetAddressRange(hipDeviceptr_t* pbase, size_t* psize, hipDeviceptr_t dptr); hipError_t hipMemcpyPeer(void* dst, int dstDevice, const void* src, int srcDevice, size_t count); hipError_t hipMemcpyPeerAsync(void* dst, int dstDevice, const void* src, int srcDevice, size_t count, hipStream_t stream = 0); // Profile APIs: hipError_t hipProfilerStart(); hipError_t hipProfilerStop(); hipError_t hipSetDeviceFlags(unsigned int flags); hipError_t hipEventCreateWithFlags(hipEvent_t* event, unsigned int flags); inline hipError_t hipEventQuery(hipEvent_t event) { if(!_hipcpu_runtime.events().is_valid(event)) return hipErrorInvalidValue; bool is_ready = _hipcpu_runtime.events().get(event)->is_complete(); if(!is_ready) return hipErrorNotReady; return hipSuccess; } /* hipError_t hipCtxCreate(hipCtx_t* ctx, unsigned int flags, hipDevice_t device); hipError_t hipCtxDestroy(hipCtx_t ctx); hipError_t hipCtxPopCurrent(hipCtx_t* ctx); hipError_t hipCtxPushCurrent(hipCtx_t ctx); hipError_t hipCtxSetCurrent(hipCtx_t ctx); hipError_t hipCtxGetCurrent(hipCtx_t* ctx); hipError_t hipCtxGetDevice(hipDevice_t* device); hipError_t hipCtxGetApiVersion(hipCtx_t ctx, int* apiVersion); hipError_t hipCtxGetCacheConfig(hipFuncCache* cacheConfig); hipError_t hipCtxSetCacheConfig(hipFuncCache cacheConfig); hipError_t hipCtxSetSharedMemConfig(hipSharedMemConfig config); hipError_t hipCtxGetSharedMemConfig(hipSharedMemConfig* pConfig); hipError_t hipCtxSynchronize(void); hipError_t hipCtxGetFlags(unsigned int* flags); hipError_t hipCtxDetach(hipCtx_t ctx); hipError_t hipDeviceGet(hipDevice_t* device, int ordinal); hipError_t hipDeviceComputeCapability(int* major, int* minor, hipDevice_t device); hipError_t hipDeviceGetName(char* name, int len, hipDevice_t device); hipError_t hipDeviceGetPCIBusId(char* pciBusId, int len, hipDevice_t device); hipError_t hipDeviceGetByPCIBusId(int* device, const char* pciBusId); hipError_t hipDeviceGetSharedMemConfig(hipSharedMemConfig* config); hipError_t hipDeviceSetSharedMemConfig(hipSharedMemConfig config); hipError_t hipDeviceGetLimit(size_t* pValue, hipLimit_t limit); hipError_t hipDeviceTotalMem(size_t* bytes, hipDevice_t device); hipError_t hipModuleLoad(hipModule_t* module, const char* fname); hipError_t hipModuleUnload(hipModule_t hmod); hipError_t hipModuleGetFunction(hipFunction_t* function, hipModule_t module, const char* kname); hipError_t hipFuncGetAttributes(hipFuncAttributes* attr, const void* func); hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr, size_t* bytes, hipModule_t hmod, const char* name); hipError_t hipModuleLoadData(hipModule_t* module, const void* image); hipError_t hipModuleLoadDataEx(hipModule_t* module, const void* image, unsigned int numOptions, hipJitOption* options, void** optionValues); hipError_t hipModuleLaunchKernel(hipFunction_t f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hipStream_t stream, void** kernelParams, void** extra); hipError_t hipFuncSetCacheConfig(const void* func, hipFuncCache_t cacheConfig); */ template <class T> hipError_t hipOccupancyMaxPotentialBlockSize(int* minGridSize, int* blockSize, T func, size_t dynamicSMemSize = 0, int blockSizeLimit = 0, unsigned int flags = 0); /* template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTexture(size_t* offset, const struct texture<T, dim, readMode>& tex, const void* devPtr, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTexture(size_t* offset, struct texture<T, dim, readMode>& tex, const void* devPtr, const struct hipChannelFormatDesc& desc, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipUnbindTexture(struct texture<T, dim, readMode>* tex); hipError_t hipBindTexture(size_t* offset, textureReference* tex, const void* devPtr, const hipChannelFormatDesc* desc, size_t size = UINT_MAX); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode>& tex, hipArray_const_t array, const struct hipChannelFormatDesc& desc); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode> *tex, hipArray_const_t array, const struct hipChannelFormatDesc* desc); template <class T, int dim, enum hipTextureReadMode readMode> hipError_t hipBindTextureToArray(struct texture<T, dim, readMode>& tex, hipArray_const_t array); template <class T> hipChannelFormatDesc hipCreateChannelDesc(); hipChannelFormatDesc hipCreateChannelDesc(int x, int y, int z, int w, hipChannelFormatKind f); hipError_t hipCreateTextureObject(hipTextureObject_t* pTexObject, const hipResourceDesc* pResDesc, const hipTextureDesc* pTexDesc, const hipResourceViewDesc* pResViewDesc); hipError_t hipDestroyTextureObject(hipTextureObject_t textureObject); hipError_t hipCreateSurfaceObject(hipSurfaceObject_t* pSurfObject, const hipResourceDesc* pResDesc); hipError_t hipDestroySurfaceObject(hipSurfaceObject_t surfaceObject); hipError_t hipGetTextureObjectResourceDesc(hipResourceDesc* pResDesc, hipTextureObject_t textureObject); hipError_t hipGetTextureAlignmentOffset(size_t* offset, const textureReference* texref); hipError_t hipGetChannelDesc(hipChannelFormatDesc* desc, hipArray_const_t array); */ #define HIPCPU_MAKE_VECTOR1(T, name) \ struct name {\ T x; \ }; #define HIPCPU_MAKE_VECTOR2(T, name) \ struct name {\ T x; \ T y; \ }; #define HIPCPU_MAKE_VECTOR3(T, name) \ struct name {\ T x; \ T y; \ T z; \ }; #define HIPCPU_MAKE_VECTOR4(T, name) \ struct name {\ T x; \ T y; \ T z; \ T w; \ }; #define HIPCPU_MAKE_VECTOR_TYPE(T, prefix) \ HIPCPU_MAKE_VECTOR1(T, prefix##1) \ HIPCPU_MAKE_VECTOR2(T, prefix##2) \ HIPCPU_MAKE_VECTOR3(T, prefix##3) \ HIPCPU_MAKE_VECTOR4(T, prefix##4) HIPCPU_MAKE_VECTOR_TYPE(signed char, char) HIPCPU_MAKE_VECTOR_TYPE(unsigned char, uchar) HIPCPU_MAKE_VECTOR_TYPE(short, short) HIPCPU_MAKE_VECTOR_TYPE(unsigned short, ushort) HIPCPU_MAKE_VECTOR_TYPE(int, int) HIPCPU_MAKE_VECTOR_TYPE(unsigned, uint) HIPCPU_MAKE_VECTOR_TYPE(long, long) HIPCPU_MAKE_VECTOR_TYPE(unsigned long, ulong) HIPCPU_MAKE_VECTOR_TYPE(long long, longlong) HIPCPU_MAKE_VECTOR_TYPE(unsigned long long, ulonglong) HIPCPU_MAKE_VECTOR_TYPE(float, float) HIPCPU_MAKE_VECTOR_TYPE(double, double) __device__ inline void __syncthreads() { #pragma omp barrier } __device__ inline float __fadd_rd(float x, float y) { return x+y; } __device__ inline float __fadd_rn(float x, float y) { return x+y; } __device__ inline float __fadd_ru(float x, float y) { return x+y; } __device__ inline float __fadd_rz(float x, float y) { return x+y; } __device__ inline float __fdiv_rd(float x, float y) { return x/y; } __device__ inline float __fdiv_rn(float x, float y) { return x/y; } __device__ inline float __fdiv_ru(float x, float y) { return x/y; } __device__ inline float __fdiv_rz(float x, float y) { return x/y; } __device__ inline float __fdividef(float x, float y) { return x/y; } __device__ inline float __fmaf_rd(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_rn(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_ru(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmaf_rz(float x, float y, float z) { return std::fma(x,y,z); } __device__ inline float __fmul_rd(float x, float y) { return x*y; } __device__ inline float __fmul_rn(float x, float y) { return x*y; } __device__ inline float __fmul_ru(float x, float y) { return x*y; } __device__ inline float __fmul_rz(float x, float y) { return x*y; } __device__ inline float __frcp_rd(float x) { return 1.f/x; } __device__ inline float __frcp_rn(float x) { return 1.f/x; } __device__ inline float __frcp_ru(float x) { return 1.f/x; } __device__ inline float __frcp_rz(float x) { return 1.f/x; } __device__ inline float __frsqrt_rn(float x) { return 1.f/std::sqrt(x); } __device__ inline float __fsqrt_rd(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_rn(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_ru(float x) { return std::sqrt(x); } __device__ inline float __fsqrt_rz(float x) { return std::sqrt(x); } __device__ inline float __fsub_rd(float x, float y) { return x-y; } __device__ inline float __fsub_rn(float x, float y) { return x-y; } __device__ inline float __fsub_ru(float x, float y) { return x-y; } __device__ inline float __fsub_rz(float x, float y) { return x-y; } __device__ inline double __dadd_rd(double x, double y) { return x+y; } __device__ inline double __dadd_rn(double x, double y) { return x+y; } __device__ inline double __dadd_ru(double x, double y) { return x+y; } __device__ inline double __dadd_rz(double x, double y) { return x+y; } __device__ inline double __ddiv_rd(double x, double y) { return x/y; } __device__ inline double __ddiv_rn(double x, double y) { return x/y; } __device__ inline double __ddiv_ru(double x, double y) { return x/y; } __device__ inline double __ddiv_rz(double x, double y) { return x/y; } __device__ inline double __dmul_rd(double x, double y) { return x*y; } __device__ inline double __dmul_rn(double x, double y) { return x*y; } __device__ inline double __dmul_ru(double x, double y) { return x*y; } __device__ inline double __dmul_rz(double x, double y) { return x*y; } __device__ inline double __drcp_rd(double x) { return 1./x; } __device__ inline double __drcp_rn(double x) { return 1./x; } __device__ inline double __drcp_ru(double x) { return 1./x; } __device__ inline double __drcp_rz(double x) { return 1./x; } __device__ inline double __dsqrt_rd(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_rn(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_ru(double x) { return std::sqrt(x); } __device__ inline double __dsqrt_rz(double x) { return std::sqrt(x); } __device__ inline double __dsub_rd(double x, double y) { return x - y; } __device__ inline double __dsub_rn(double x, double y) { return x - y; } __device__ inline double __dsub_ru(double x, double y) { return x - y; } __device__ inline double __dsub_rz(double x, double y) { return x - y; } __device__ inline double __fma_rd(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_rn(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_ru(double x, double y, double z) { return std::fma(x,y,z); } __device__ inline double __fma_rz(double x, double y, double z) { return std::fma(x,y,z); } #endif // HIPCPU_RUNTIME_H
StochasticMaxPooling.h
// -------------------------------------------------------------------------- // Binary Brain -- binary neural net framework // // Copyright (C) 2018 by Ryuji Fuchikami // https://github.com/ryuz // ryuji.fuchikami@nifty.com // -------------------------------------------------------------------------- #pragma once #include <vector> #include <random> #include "bb/MaxPooling.h" namespace bb { // MaxPoolingクラス template <typename FT = float, typename BT = float> class StochasticMaxPooling : public MaxPooling<FT, BT> { using _super = MaxPooling<FT, BT>; public: static inline std::string ModelName(void) { return "StochasticMaxPooling"; } static inline std::string ObjectName(void){ return ModelName() + "_" + DataType<FT>::Name() + "_" + DataType<BT>::Name(); } std::string GetModelName(void) const override { return ModelName(); } std::string GetObjectName(void) const override { return ObjectName(); } protected: bool m_host_only = false; index_t m_filter_h_size; index_t m_filter_w_size; index_t m_input_w_size; index_t m_input_h_size; index_t m_input_c_size; index_t m_output_w_size; index_t m_output_h_size; index_t m_output_c_size; indices_t m_input_shape; indices_t m_output_shape; FrameBuffer m_x_buf; protected: StochasticMaxPooling(index_t filter_h_size, index_t filter_w_size) { m_filter_h_size = filter_h_size; m_filter_w_size = filter_w_size; } /** * @brief コマンド処理 * @detail コマンド処理 * @param args コマンド */ void CommandProc(std::vector<std::string> args) { // HostOnlyモード設定 if (args.size() == 2 && args[0] == "host_only") { m_host_only = EvalBool(args[1]); } } public: ~StochasticMaxPooling() {} static std::shared_ptr<StochasticMaxPooling> Create(index_t filter_h_size=1, index_t filter_w_size=1) { auto self = std::shared_ptr<StochasticMaxPooling>(new StochasticMaxPooling(filter_h_size, filter_w_size)); return self; } index_t GetFilterHeight(void) const override { return m_filter_h_size; } index_t GetFilterWidth(void) const override { return m_filter_w_size; } /** * @brief 入力形状設定 * @detail 入力形状を設定する * 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする * 同一形状を指定しても内部変数は初期化されるものとする * @param shape 1フレームのノードを構成するshape * @return 出力形状を返す */ indices_t SetInputShape(indices_t shape) { // 設定済みなら何もしない if ( shape == this->GetInputShape() ) { return this->GetOutputShape(); } BB_ASSERT(shape.size() == 3); m_input_c_size = shape[0]; m_input_h_size = shape[1]; m_input_w_size = shape[2]; m_output_w_size = (m_input_w_size + m_filter_w_size - 1) / m_filter_w_size; m_output_h_size = (m_input_h_size + m_filter_h_size - 1) / m_filter_h_size; m_output_c_size = m_input_c_size; m_input_shape = shape; m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); return m_output_shape; } /** * @brief 入力形状取得 * @detail 入力形状を取得する * @return 入力形状を返す */ indices_t GetInputShape(void) const { return m_input_shape; } /** * @brief 出力形状取得 * @detail 出力形状を取得する * @return 出力形状を返す */ indices_t GetOutputShape(void) const { return m_output_shape; } protected: /* inline void* GetInputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x) { return buf.Lock((c*m_input_h_size + y)*m_input_w_size + x); } inline void* GetOutputPtr(NeuralNetBuffer<T>& buf, int c, int y, int x) { return buf.Lock((c*m_output_h_size + y)*m_output_w_size + x); } */ inline index_t GetInputNode(index_t c, index_t y, index_t x) { return (c * m_input_h_size + y) * m_input_w_size + x; } inline index_t GetOutputNode(index_t c, index_t y, index_t x) { return (c * m_output_h_size + y) * m_output_w_size + x; } public: FrameBuffer Forward(FrameBuffer x_buf, bool train = true) { BB_ASSERT(x_buf.GetType() == DataType<FT>::type); // SetInputShpaeされていなければ初回に設定 if (x_buf.GetShape() != m_input_shape) { SetInputShape(x_buf.GetShape()); } // backwardの為に保存 if ( train ) { m_x_buf = x_buf; } // 出力を設定 FrameBuffer y_buf(x_buf.GetFrameSize(), m_output_shape, DataType<FT>::type); #ifdef BB_WITH_CUDA // CUDA版 if ( DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_filter_h_size == 2 && m_filter_w_size == 2 && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_y = y_buf.LockDeviceMemory(true); bbcu_fp32_StochasticMaxPooling2x2_Forward ( (float const *)ptr_x.GetAddr(), (float* )ptr_y.GetAddr(), (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )y_buf.GetFrameSize(), (int )(y_buf.GetFrameStride() / sizeof(float)) ); return y_buf; } #endif // 汎用版実装 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = y_buf.Lock<FT>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { // OR演算を実施(反転してANDを取って、出力反転) BT out_sig = (BT)1.0; for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, {c, iy, ix}); out_sig *= ((BT)1.0 - in_sig); } } } } y_ptr.Set(frame, {c, y, x}, (FT)((BT)1.0 - out_sig)); } } } } return y_buf; } } FrameBuffer Backward(FrameBuffer dy_buf) { if (dy_buf.Empty()) { return FrameBuffer(); } BB_ASSERT(dy_buf.GetType() == DataType<BT>::type); BB_ASSERT(dy_buf.GetShape().size() == 3); FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, DataType<BT>::type); FrameBuffer x_buf = m_x_buf; m_x_buf = FrameBuffer(); #ifdef BB_WITH_CUDA if ( DataType<BT>::type == BB_TYPE_FP32 && DataType<FT>::type == BB_TYPE_FP32 && !m_host_only && m_filter_h_size == 2 && m_filter_w_size == 2 && x_buf.IsDeviceAvailable() && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) { // CUDA版 auto ptr_x = x_buf.LockDeviceMemoryConst(); auto ptr_dy = dy_buf.LockDeviceMemoryConst(); auto ptr_dx = dx_buf.LockDeviceMemory(true); bbcu_fp32_StochasticMaxPooling2x2_Backward ( (float const *)ptr_x.GetAddr(), (float const *)ptr_dy.GetAddr(), (float *)ptr_dx.GetAddr(), (int )m_input_w_size, (int )m_input_h_size, (int )m_output_w_size, (int )m_output_h_size, (int )m_output_c_size, (int )dy_buf.GetFrameSize(), (int )(dy_buf.GetFrameStride() / sizeof(float)) ); return dx_buf; } #endif // 汎用版実装 if ( m_filter_h_size == 2 && m_filter_w_size == 2 ) { auto x_ptr = x_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { BT in_sig[2][2] = {{0, 0}, {0, 0}}; for (index_t fy = 0; fy < 2; ++fy) { index_t iy = y*2 + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < 2; ++fx) { index_t ix = x*2 + fx; if ( ix < m_input_w_size ) { in_sig[fy][fx] = (BT)x_ptr.Get(frame, c, iy, ix); } } } } BT out_grad = dy_ptr.Get(frame, c, y, x); BT in_grad[2][2]; in_grad[0][0] = (BT)(out_grad * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][0]) * (1.0 - in_sig[1][1])); in_grad[0][1] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[1][0]) * (1.0 - in_sig[1][1])); in_grad[1][0] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][1])); in_grad[1][1] = (BT)(out_grad * (1.0 - in_sig[0][0]) * (1.0 - in_sig[0][1]) * (1.0 - in_sig[1][0])); for (index_t fy = 0; fy < 2; ++fy) { index_t iy = y*2 + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < 2; ++fx) { index_t ix = x*2 + fx; if ( ix < m_input_w_size ) { dx_ptr.Set(frame, c, iy, ix, in_grad[fy][fx]); } } } } } } } } return dx_buf; } // 汎用版実装(未着手:下記は MaxPooling のまま) BB_ASSERT(0); #if 0 { auto x_ptr = x_buf.LockConst<FT>(); auto y_ptr = m_y_buf.LockConst<FT>(); auto dy_ptr = dy_buf.LockConst<BT>(); auto dx_ptr = dx_buf.Lock<BT>(true); auto frame_size = m_x_buf.GetFrameSize(); #pragma omp parallel for for (index_t c = 0; c < m_input_c_size; ++c) { for (index_t y = 0; y < m_output_h_size; ++y) { for (index_t x = 0; x < m_output_w_size; ++x) { for (index_t frame = 0; frame < frame_size; ++frame) { FT out_sig = y_ptr.Get(frame, c, y, x); BT grad = dy_ptr.Get(frame, c, y, x); for (index_t fy = 0; fy < m_filter_h_size; ++fy) { index_t iy = y*m_filter_h_size + fy; if ( iy < m_input_h_size ) { for (index_t fx = 0; fx < m_filter_w_size; ++fx) { index_t ix = x*m_filter_w_size + fx; if ( ix < m_input_w_size ) { FT in_sig = x_ptr.Get(frame, c, iy, ix); dx_ptr.Set(frame, c, iy, ix, (in_sig == out_sig) ? grad : (BT)0); } } } } } } } } return dx_buf; } #endif } // シリアライズ protected: void DumpObjectData(std::ostream &os) const override { // バージョン std::int64_t ver = 1; bb::SaveValue(os, ver); // 親クラス _super::DumpObjectData(os); // メンバ bb::SaveValue(os, m_host_only); bb::SaveValue(os, m_filter_h_size); bb::SaveValue(os, m_filter_w_size); bb::SaveValue(os, m_input_w_size); bb::SaveValue(os, m_input_h_size); bb::SaveValue(os, m_input_c_size); bb::SaveValue(os, m_output_w_size); bb::SaveValue(os, m_output_h_size); bb::SaveValue(os, m_output_c_size); } void LoadObjectData(std::istream &is) override { // バージョン std::int64_t ver; bb::LoadValue(is, ver); BB_ASSERT(ver == 1); // 親クラス _super::LoadObjectData(is); // メンバ bb::LoadValue(is, m_host_only); bb::LoadValue(is, m_filter_h_size); bb::LoadValue(is, m_filter_w_size); bb::LoadValue(is, m_input_c_size); bb::LoadValue(is, m_input_h_size); bb::LoadValue(is, m_input_w_size); bb::LoadValue(is, m_output_c_size); bb::LoadValue(is, m_output_h_size); bb::LoadValue(is, m_output_w_size); // 再構築 m_input_shape = indices_t({m_input_c_size, m_input_h_size, m_input_w_size}); m_output_shape = indices_t({m_output_c_size, m_output_h_size, m_output_w_size}); } }; }
loops_course.c
#include <stdio.h> #include <math.h> #include <malloc.h> #include <string.h> #define N 729 #define reps 1000 #include <omp.h> double a[N][N], b[N][N], c[N]; int jmax[N]; void init1(void); void init2(void); void runloop(int); void loop1chunk(int, int); void loop2chunk(int, int); void valid1(void); void valid2(void); int main(int argc, char *argv[]) { double start1,start2,end1,end2; int r; init1(); start1 = omp_get_wtime(); for (r=0; r<reps; r++){ runloop(1); } end1 = omp_get_wtime(); valid1(); printf("Total time for %d reps of loop 1 = %f\n",reps, (float)(end1-start1)); init2(); start2 = omp_get_wtime(); for (r=0; r<reps; r++){ runloop(2); } end2 = omp_get_wtime(); valid2(); printf("Total time for %d reps of loop 2 = %f\n",reps, (float)(end2-start2)); } void init1(void){ int i,j; for (i=0; i<N; i++){ for (j=0; j<N; j++){ a[i][j] = 0.0; b[i][j] = 3.142*(i+j); } } } void init2(void){ int i,j, expr; for (i=0; i<N; i++){ expr = i%( 3*(i/30) + 1); if ( expr == 0) { jmax[i] = N; } else { jmax[i] = 1; } c[i] = 0.0; } for (i=0; i<N; i++){ for (j=0; j<N; j++){ b[i][j] = (double) (i*j+1) / (double) (N*N); } } } /** *******************The definition of new structure***************************** **/ //The ordered top list queue typedef struct _TopList { int thread_num; int avail_size; } TopList ; /** *******************The declearation of new functions************************** **/ /** * @brief Update the available size of the thread in Top List, while reordering the whole list * @param plist pointer of top list * @param list_len length of top list * @param t_num thread number which should be updated * @param avail_size the available size that should be updated * @return void **/ void updatetoplist(TopList *plist , int list_len , int t_num , int avail_size); /** * @brief Get a piece of work from a specific thread's local trunk * @param pavail the two-dimensional array stored chunk infomation * @param plist pointer of top list * @param t_num thread number for assignment * @param t_size the total threads number * @param plow return the start number of loops got * @param phigh return the end number of loops got * @return if succceed renturn 1, or renturn 0 **/ int gettrunk(int (*pavail)[2] , TopList *plist , int t_num , int t_size , int *plow , int *phigh); /** * @brief Assign a new trunk to a specific threa * @param pavail the two-dimensional array stored chunk infomation * @param plist pointer of top list * @param t_num thread number for assignment * @param t_size the total threads number * @param plow return the start number of loops got * @param phigh return the end number of loops got * @return if succceed renturn 1, or renturn 0 **/ int dispatchwork(int (*pavail)[2] , omp_lock_t *plock , TopList *plist , \ int t_num , int t_size , int *plow , int *phigh); /** *******************The modification of runloops function***************************** **/ void runloop(int loopid) { //Shared values among threads int (*pavail)[2]; //The boundary value of each thread's local chunk TopList *plist; //The ordered top list of threads number by their available trunk size omp_lock_t top_lock; //Lock for serialize the work assignment actions #pragma omp parallel default(none) shared(loopid,pavail,plist,top_lock) { int myid = omp_get_thread_num(); int nthreads = omp_get_num_threads(); // printf("INIT:T_ID-%d TOTAL-%d\n",myid,nthreads); // int ipt = (int) ceil((double)N/(double)nthreads); // int lo = myid*ipt; // int hi = (myid+1)*ipt; // if (hi > N) hi = N; //Add the single directive to do initialization work #pragma omp single { //Apply the space to store the trunk's boundary for available loops pavail = (int(*)[2])malloc( sizeof(int)*2*nthreads ); int block_size = N /nthreads ; //Set the initial trunk bundary for each thread and init the locks. for (int i = 0; i < nthreads; i++) { pavail[i][0] = i * block_size; pavail[i][1] = (i+1) * block_size; if ( N - (i+1) * block_size < block_size ) pavail[i][1] = N; } //Initialize the top list and lock omp_init_lock(&top_lock) ; plist = (TopList *)malloc(sizeof(TopList)*nthreads); memset(plist,-1,sizeof(TopList)*nthreads); } int result = 1; int lo, hi; // Add the loops and call the dispatchwork fuction to assign loops from local chunks while ( dispatchwork(pavail,&top_lock,plist,myid,nthreads,&lo,&hi) ) { switch (loopid) { case 1: loop1chunk(lo,hi); break; case 2: loop2chunk(lo,hi); break; } } } free(plist); free(pavail); } void loop1chunk(int lo, int hi) { int i,j; for (i=lo; i<hi; i++){ for (j=N-1; j>i; j--){ a[i][j] += cos(b[i][j]); } } } void loop2chunk(int lo, int hi) { int i,j,k; double rN2; rN2 = 1.0 / (double) (N*N); for (i=lo; i<hi; i++){ for (j=0; j < jmax[i]; j++){ for (k=0; k<j; k++){ c[i] += (k+1) * log (b[i][j]) * rN2; } } } } void valid1(void) { int i,j; double suma; suma= 0.0; for (i=0; i<N; i++){ for (j=0; j<N; j++){ suma += a[i][j]; } } printf("Loop 1 check: Sum of a is %lf\n", suma); } void valid2(void) { int i; double sumc; sumc= 0.0; for (i=0; i<N; i++){ sumc += c[i]; } printf("Loop 2 check: Sum of c is %f\n", sumc); } /** *******************The defination of new functions******************************** **/ //Update the available size of the thread in Top List, while reordering the whole list void updatetoplist(TopList *plist , int list_len , int t_num , int avail_size) { int origin_num = t_num; TopList tp_swap,tp_cur; tp_cur.thread_num = t_num; tp_cur.avail_size = avail_size; int iter_mode = 0; for (int i=0; i<list_len; i++) { if (iter_mode == 0) { if (plist[i].thread_num == -1) { plist[i] = tp_cur; break; } else if (plist[i].thread_num == origin_num) { plist[i] = tp_cur; iter_mode = 1; } else if (plist[i].avail_size < tp_cur.avail_size ) { tp_swap = plist[i] ; plist[i] = tp_cur ; tp_cur = tp_swap ; } } else if (iter_mode == 1) { if (plist[i].thread_num == -1) break; else if (plist[i].avail_size > plist[i-1].avail_size) { tp_swap = plist[i] ; plist[i] = plist[i-1]; plist[i-1] = tp_swap; } else break; } } } //Get a piece of work from a specific thread's local trunk int getchunk(int (*pavail)[2] , TopList *plist , int t_num , int t_size , int *plow , int *phigh) { int get_size = 0; int avail_size = pavail[t_num][1] - pavail[t_num][0]; if ( avail_size > 0 ) { *plow = pavail[t_num][0]; get_size = avail_size / t_size; if (get_size == 0 ) get_size = 1; *phigh = *plow + get_size; pavail[t_num][0] += get_size; avail_size = pavail[t_num][1] - pavail[t_num][0]; updatetoplist(plist,t_size,t_num,avail_size); return 1; } else return 0; } //Assign a new trunk to a specific thread int dispatchwork(int (*pavail)[2] , omp_lock_t *plock , TopList *plist , \ int t_num , int t_size , int *plow , int *phigh) { int result = 1; omp_set_lock(plock); if (getchunk(pavail,plist,t_num,t_size,plow,phigh) == 0) { if (plist[0].avail_size > 0) { if (getchunk(pavail,plist,plist[0].thread_num,t_size,plow,phigh) == 0) result = 0; } else result = 0; } omp_unset_lock(plock); return result ; }
merge_tasks.c
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> /* OpenMP Parallel Mergesort - Tasking * * @author: ANDREW VAILLANCOURT * 2019 */ void merge(int a[], int size, int temp[]); void insertion_sort(int a[], int size); void mergesort_serial(int a[], int size, int temp[], int thresh); void mergesort_parallel_omp(int a[], int size, int temp[], int threads, int thresh); void run_omp(int a[], int size, int temp[], int threads, int thresh); int main(int argc, char *argv[]) { if (argc != 4) { printf("Usage: %s array_size threshold num_threads\n", argv[0]); return 1; } int size = atoi(argv[1]); // Array size int thresh = atoi(argv[2]); // point at which sort switches to insertion int threads = atoi(argv[3]); // Requested number of threads double start, end; // Check nested parallelism availability omp_set_nested(1); if (omp_get_nested() != 1) { puts("Warning: Nested parallelism desired but unavailable"); } // Check processors and threads int processors = omp_get_num_procs(); // Available processors if (threads > processors) { printf("Warning: %d threads requested, will run_omp on %d processors available\n",threads, processors); omp_set_num_threads(threads); } int max_threads = omp_get_max_threads(); // Max available threads if (threads > max_threads) // Requested threads are more than max available { printf("Error: Cannot use %d threads, only %d threads available\n", threads, max_threads); return 1; } // Array allocation int *a = malloc(sizeof(int) * size); int *temp = malloc(sizeof(int) * size); if (a == NULL || temp == NULL) { printf("Error: Could not allocate array of size %d\n", size); return 1; } // array initialization int i; srand(314159); for (i = 0; i < size; i++) { a[i] = rand() % size; } // run sort and get time start = omp_get_wtime(); run_omp(a, size, temp, threads, thresh); end = omp_get_wtime(); printf("%.4f\n", end - start); // check sorted for (i = 1; i < size; i++) { if (!(a[i - 1] <= a[i])) { printf("Error: final array not sorted => a[%d]=%d > a[%d]=%d\n", i - 1, a[i - 1], i, a[i]); return 1; } } return 0; } void run_omp(int a[], int size, int temp[], int threads, int thresh) { omp_set_nested(1); // Enable nested parallelism, if available mergesort_parallel_omp(a, size, temp, threads, thresh); } // OpenMP merge sort with given number of threads void mergesort_parallel_omp(int a[], int size, int temp[], int threads, int thresh) { if (threads == 1) { mergesort_serial(a, size, temp, thresh); } else if (threads > 1) { #pragma omp parallel { #pragma omp single nowait { #pragma omp task { mergesort_parallel_omp(a, size / 2, temp, threads / 2, thresh); } #pragma omp task { mergesort_parallel_omp(a + size / 2, size - size / 2, temp + size / 2, threads - threads / 2, thresh); } #pragma omp taskwait { merge(a, size, temp); } } } } else { printf("Error: %d threads\n", threads); return; } } // only called if num_threads = 1 void mergesort_serial(int a[], int size, int temp[], int thresh) { // Switch to insertion sort for small arrays if (size <= thresh) { insertion_sort(a, size); return; } mergesort_serial(a, size / 2, temp, thresh); mergesort_serial(a + size / 2, size - size / 2, temp, thresh); merge(a, size, temp); } void merge(int a[], int size, int temp[]) { int i1 = 0; int i2 = size / 2; int tempi = 0; while (i1 < size / 2 && i2 < size) { if (a[i1] < a[i2]) { temp[tempi] = a[i1]; i1++; } else { temp[tempi] = a[i2]; i2++; } tempi++; } while (i1 < size / 2) { temp[tempi] = a[i1]; i1++; tempi++; } while (i2 < size) { temp[tempi] = a[i2]; i2++; tempi++; } // Copy sorted temp array into main array, a memcpy(a, temp, size * sizeof(int)); } void insertion_sort(int a[], int size) { int i; for (i = 0; i < size; i++) { int j, v = a[i]; for (j = i - 1; j >= 0; j--) { if (a[j] <= v) break; a[j + 1] = a[j]; } a[j + 1] = v; } }
Example_collapse.3.c
/* * @@name: collapse.3c * @@type: C * @@compilable: yes * @@linkable: no * @@expect: success * @@version: omp_3.0 */ #include <omp.h> #include <stdio.h> void work(int a, int j, int k); void sub() { int j, k, a; #pragma omp parallel num_threads(2) { #pragma omp for collapse(2) ordered private(j,k) schedule(static,3) for (k=1; k<=3; k++) for (j=1; j<=2; j++) { #pragma omp ordered printf("%d %d %d\n", omp_get_thread_num(), k, j); /* end ordered */ work(a,j,k); } } }
rose_v1_regression01.c
/* * Contributed by Jeff Keasler * * Liao 2/10/2010 * */ #include <omp.h> typedef double real8; void foo(real8 *a,real8 *b,real8 *c,real8 *d,int len) { int icol; int jrow; int l; for (l = 0; l <= len - 1; l += 1) { int l8 = l * 8; real8 e = d[l * 3 + 0]; real8 f = d[l * 3 + 1]; real8 g = d[l * 3 + 2]; real8 h = b[l]; real8 tmp[8]; #pragma omp parallel for private (icol) firstprivate (e,f,g) for (icol = 0; icol <= 7; icol += 1) { tmp[icol] = e * c[(icol + l8) * 4 + 1] + f * c[(icol + l8) * 4 + 2] + g * c[(icol + l8) * 4 + 3]; } #pragma omp parallel for private (icol,jrow) firstprivate (l8,h) for (jrow = 0; jrow <= 7; jrow += 1) { real8 hj1 = h * c[(jrow + l8) * 4]; #pragma omp parallel for private (icol) firstprivate (hj1) for (icol = 0; icol <= 7; icol += 1) { a[icol + (jrow + l8) * 8] += hj1 * tmp[icol]; } } } }
1.c
#include <math.h> #include <omp.h> #include <stdio.h> // 1 thread = 47.3 ms ± 2.0 ms // 2 thread = 66.5 ms ± 6.3 ms // 4 thread = 39.5 ms ± 2.2 ms // 8 thread = 41.8 ms ± 2.0 ms int main() { int result[1] = {0}; #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < 1000000; i++) { result[0] += sin(i); } }
update_ops_named_Y.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif //void Y_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void Y_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); void Y_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { //Y_gate_old_single(target_qubit_index, state, dim); //Y_gate_old_parallel(target_qubit_index, state, dim); //Y_gate_single(target_qubit_index, state, dim); //Y_gate_single_simd(target_qubit_index, state, dim); //Y_gate_single_unroll(target_qubit_index, state, dim); //Y_gate_parallel(target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Y_gate_single_simd(target_qubit_index, state, dim); } else { Y_gate_parallel_simd(target_qubit_index, state, dim); } #else Y_gate_single_simd(target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { Y_gate_single_unroll(target_qubit_index, state, dim); } else { Y_gate_parallel_unroll(target_qubit_index, state, dim); } #else Y_gate_single_unroll(target_qubit_index, state, dim); #endif #endif } void Y_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; if (target_qubit_index == 0) { ITYPE basis_index; for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp0 = state[basis_index]; state[basis_index] = -imag * state[basis_index + 1]; state[basis_index + 1] = imag * temp0; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0+1]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0+1] = -imag * state[basis_index_1+1]; state[basis_index_1] = imag * temp0; state[basis_index_1+1] = imag * temp1; } } } #ifdef _OPENMP void Y_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; if (target_qubit_index == 0) { ITYPE basis_index; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp0 = state[basis_index]; state[basis_index] = -imag * state[basis_index + 1]; state[basis_index + 1] = imag * temp0; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0 + 1] = -imag * state[basis_index_1 + 1]; state[basis_index_1] = imag * temp0; state[basis_index_1 + 1] = imag * temp1; } } } #endif #ifdef _USE_SIMD void Y_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //const CTYPE imag = 1.i; __m256d minus_even = _mm256_set_pd(1, -1, 1, -1); __m256d minus_odd = _mm256_set_pd(-1, 1, -1, 1); __m256d minus_half = _mm256_set_pd(1, -1, -1, 1); if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_permute4x64_pd(data0, 27); // (3210) -> (0123) : 16+4*2+3=27 data0 = _mm256_mul_pd(data0, minus_half); _mm256_storeu_pd(ptr0, data0); } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); data0 = _mm256_permute_pd(data0, 5); // (3210) -> (2301) : 4+1 data1 = _mm256_permute_pd(data1, 5); data0 = _mm256_mul_pd(data0, minus_even); data1 = _mm256_mul_pd(data1, minus_odd); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void Y_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //const CTYPE imag = 1.i; __m256d minus_even = _mm256_set_pd(1, -1, 1, -1); __m256d minus_odd = _mm256_set_pd(-1, 1, -1, 1); __m256d minus_half = _mm256_set_pd(1, -1, -1, 1); if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr0 = (double*)(state + basis_index); __m256d data0 = _mm256_loadu_pd(ptr0); data0 = _mm256_permute4x64_pd(data0, 27); // (3210) -> (0123) : 16+4*2+3=27 data0 = _mm256_mul_pd(data0, minus_half); _mm256_storeu_pd(ptr0, data0); } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); data0 = _mm256_permute_pd(data0, 5); // (3210) -> (2301) : 4+1 data1 = _mm256_permute_pd(data1, 5); data0 = _mm256_mul_pd(data0, minus_even); data1 = _mm256_mul_pd(data1, minus_odd); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* #ifdef _OPENMP void Y_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = -imag * state[basis_index_1]; state[basis_index_0] = imag * temp; } } #endif void Y_gate_old_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; CTYPE cval_0 = state[basis_index_0]; CTYPE cval_1 = state[basis_index_1]; state[basis_index_0] = -cval_1 * 1.i; state[basis_index_1] = cval_0 * 1.i; } } void Y_gate_old_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; CTYPE cval_0 = state[basis_index_0]; CTYPE cval_1 = state[basis_index_1]; state[basis_index_0] = -cval_1 * 1.i; state[basis_index_1] = cval_0 * 1.i; } } void Y_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; const CTYPE imag = 1.i; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = - imag * state[basis_index_1]; state[basis_index_0] = imag * temp; } } */
dotProduct.h
#pragma once #include <vector> #include <algorithm> #include <memory> #include <omp.h> #include "_cuda.h" #include "ceilDiv.h" #include "sum.h" using std::vector; using std::unique_ptr; using std::max; // Finds sum of element-by-element product of 2 vectors. template <class T> T dotProduct(T *x, T *y, int N) { T a = T(); for (int i=0; i<N; i++) a += x[i] * y[i]; return a; } template <class T> T dotProduct(vector<T>& x, vector<T>& y) { return dotProduct(x.data(), y.data(), x.size()); } template <class T> T dotProductOmp(T *x, T *y, int N) { T a = T(); #pragma omp parallel for reduction (+:a) for (int i=0; i<N; i++) a += x[i] * y[i]; return a; } template <class T> T dotProductOmp(vector<T>& x, vector<T>& y) { return dotProductOmp(x.data(), y.data(), x.size()); } template <class T> __device__ T dotProductKernelLoop(T *x, T *y, int N, int i, int DI) { T a = T(); for (; i<N; i+=DI) a += x[i] * y[i]; return a; } template <class T> __global__ void dotProductKernel(T *a, T *x, T *y, int N) { DEFINE(t, b, B, G); __shared__ T cache[_THREADS]; cache[t] = dotProductKernelLoop(x, y, N, B*b+t, G*B); sumKernelReduce(cache, B, t); if (t == 0) a[b] = cache[0]; } template <class T> T dotProductCuda(T *x, T *y, int N) { int threads = _THREADS; int blocks = min(ceilDiv(N, threads), _BLOCKS); size_t X1 = N * sizeof(T); size_t A1 = blocks * sizeof(T); unique_ptr<T> a(new T[A1]); T *xD, *yD, *aD; TRY( cudaMalloc(&xD, X1) ); TRY( cudaMalloc(&yD, X1) ); TRY( cudaMalloc(&aD, A1) ); TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) ); TRY( cudaMemcpy(yD, y, X1, cudaMemcpyHostToDevice) ); dotProductKernel<<<blocks, threads>>>(aD, xD, yD, N); TRY( cudaMemcpy(a.get(), aD, A1, cudaMemcpyDeviceToHost) ); TRY( cudaFree(yD) ); TRY( cudaFree(xD) ); TRY( cudaFree(aD) ); return sum(a.get(), blocks); } template <class T> T dotProductCuda(vector<T>& x, vector<T>& y) { return dotProductCuda(x.data(), y.data(), x.size()); }
dim3.h
#pragma once #include <stdint.h> #include <sstream> #include <stk/math/int3.h> #ifdef STK_USE_CUDA #include <vector_types.h> #else // Otherwise defined in vector_types.h for CUDA struct dim3 { dim3(uint32_t vx = 1, uint32_t vy = 1, uint32_t vz = 1) : x(vx), y(vy), z(vz) {} uint32_t x; uint32_t y; uint32_t z; }; #endif inline bool operator==(const dim3& l, const dim3& r) { return (l.x == r.x && l.y == r.y && l.z == r.z); } inline bool operator!=(const dim3& l, const dim3& r) { return !operator==(l, r); } inline std::ostream& operator<<(std::ostream& s, const dim3& v) { s << '(' << v.x << ' ' << v.y << ' ' << v.z << ')'; return s; } /** Iterator to simplify iterating through a region. * * Given a dim3, it holds a int3 pointing to the current position. Advancing * the iterator pushes the pointer to the next position until reaching * {0, 0, dim3.z}, which is considered the end. * * Example: * The preferred way is to use C++11 range-based for loops: * * stk::Volume vol; * for (int3 p : vol.size()) { * vol(p) = 0: * } * * To use the iterators with OpenMP you'll have to manually set up the loop, and also * remember to use 'it < end()', rather than the typical 'it != end()'. * * Note: Windows only supports OpenMP 2.0, meaning there's no support for iterators. * * stk::Volume vol; * #pragma omp parallel for * for (auto it = begin(vol.size()); it < end(vol.size()); ++it) { * vol(*it) = 0; * } * * */ struct Dim3Iterator { dim3 _dim; int3 _p; Dim3Iterator(dim3 dim, int3 p) : _dim(dim), _p(p) {} inline const int3& operator*() { return _p; } inline const int3* operator->() { return &_p; } inline void operator++() { _p.x += 1; if (_p.x == (int)_dim.x) { _p.x = 0; _p.y += 1; if (_p.y == (int)_dim.y) { _p.y = 0; _p.z += 1; } } } inline bool operator!=(const Dim3Iterator& other) { return index() != other.index(); } // Required by OpenMP inline bool operator<(const Dim3Iterator& other) { return index() < other.index(); } // Required by OpenMP inline void operator+=(size_t n) { for (size_t i = 0; i < n; ++i) ++(*this); } inline size_t index() const { return _p.x + _p.y * _dim.x + _p.z * _dim.x * _dim.y; } }; // Required by OpenMP inline size_t operator-(const Dim3Iterator& a, const Dim3Iterator& b) { return a.index() - b.index(); } // Retrieves the begin iterator for Dim3Iterator. inline Dim3Iterator begin(const dim3& d) { return Dim3Iterator(d, int3{0, 0, 0}); } // Retrieves the end iterator for Dim3Iterator. // p{0,0,d.z} is defined as the end. inline Dim3Iterator end(const dim3& d) { return Dim3Iterator(d, int3{0, 0, int(d.z)}); } namespace stk { // Check whether the given point is inside the given range inline bool is_inside(const dim3& dims, const int3& p) { return (p.x >= 0 && p.x < int(dims.x) && p.y >= 0 && p.y < int(dims.y) && p.z >= 0 && p.z < int(dims.z)); } }
cvAdvDiff_bnd_omp.c
/* ----------------------------------------------------------------- * Programmer(s): Daniel Reynolds and Ting Yan @ SMU * Based on cvAdvDiff_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2021, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example problem: * * The following is a simple example problem with a banded Jacobian, * solved using CVODE. * The problem is the semi-discrete form of the advection-diffusion * equation in 2-D: * du/dt = d^2 u / dx^2 + .5 du/dx + d^2 u / dy^2 * on the rectangle 0 <= x <= 2, 0 <= y <= 1, and the time * interval 0 <= t <= 1. Homogeneous Dirichlet boundary conditions * are posed, and the initial condition is * u(x,y,t=0) = x(2-x)y(1-y)exp(5xy). * The PDE is discretized on a uniform MX+2 by MY+2 grid with * central differencing, and with boundary values eliminated, * leaving an ODE system of size NEQ = MX*MY. * This program solves the problem with the BDF method, Newton * iteration with the SUNBAND linear solver, and a user-supplied * Jacobian routine. * It uses scalar relative and absolute tolerances. * Output is printed at t = .1, .2, ..., 1. * Run statistics (optional outputs) are printed at the end. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value or the number of threads from the * environment value, run without arguments: * % ./cvAdvDiff_bnd_omp * The environment variable can be over-ridden with a command line * argument specifying the number of threads to use, e.g: * % ./cvAdvDiff_bnd_omp 5 * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> /* Header files with a description of contents */ #include <cvode/cvode.h> /* prototypes for CVODE fcts., consts. */ #include <nvector/nvector_openmp.h> /* serial N_Vector types, fcts., macros */ #include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */ #include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */ #include <sundials/sundials_types.h> /* definition of type realtype */ #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants */ #define XMAX RCONST(2.0) /* domain boundaries */ #define YMAX RCONST(1.0) #define MX 10 /* mesh dimensions */ #define MY 5 #define NEQ MX*MY /* number of equations */ #define ATOL RCONST(1.0e-5) /* scalar absolute tolerance */ #define T0 RCONST(0.0) /* initial time */ #define T1 RCONST(0.1) /* first output time */ #define DTOUT RCONST(0.1) /* output time increment */ #define NOUT 10 /* number of output times */ #define ZERO RCONST(0.0) #define HALF RCONST(0.5) #define ONE RCONST(1.0) #define TWO RCONST(2.0) #define FIVE RCONST(5.0) /* User-defined vector access macro IJth */ /* IJth is defined in order to isolate the translation from the mathematical 2-dimensional structure of the dependent variable vector to the underlying 1-dimensional storage. IJth(vdata,i,j) references the element in the vdata array for u at mesh point (i,j), where 1 <= i <= MX, 1 <= j <= MY. The vdata array is obtained via the macro call vdata = NV_DATA_S(v), where v is an N_Vector. The variables are ordered by the y index j, then by the x index i. */ #define IJth(vdata,i,j) (vdata[(j-1) + (i-1)*MY]) /* Type : UserData (contains grid constants) */ typedef struct { realtype dx, dy, hdcoef, hacoef, vdcoef; int nthreads; } *UserData; /* Private Helper Functions */ static void SetIC(N_Vector u, UserData data); static void PrintHeader(realtype reltol, realtype abstol, realtype umax); static void PrintOutput(realtype t, realtype umax, long int nst); static void PrintFinalStats(void *cvode_mem); /* Private function to check function return values */ static int check_retval(void *returnvalue, const char *funcname, int opt); /* Functions Called by the Solver */ static int f(realtype t, N_Vector u, N_Vector udot, void *user_data); static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3); /* *------------------------------- * Main Program *------------------------------- */ int main(int argc, char *argv[]) { SUNContext sunctx; realtype dx, dy, reltol, abstol, t, tout, umax; N_Vector u; UserData data; SUNMatrix A; SUNLinearSolver LS; void *cvode_mem; int iout, retval; long int nst; int num_threads; u = NULL; data = NULL; A = NULL; LS = NULL; cvode_mem = NULL; /* Create the SUNDIALS context */ retval = SUNContext_Create(NULL, &sunctx); if(check_retval(&retval, "SUNContext_Create", 1)) return(1); /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* Overwrite with OMP_NUM_THREADS environment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Create an OpenMP vector */ u = N_VNew_OpenMP(NEQ, num_threads, sunctx); /* Allocate u vector */ if(check_retval((void*)u, "N_VNew_OpenMP", 0)) return(1); reltol = ZERO; /* Set the tolerances */ abstol = ATOL; data = (UserData) malloc(sizeof *data); /* Allocate data memory */ if(check_retval((void *)data, "malloc", 2)) return(1); dx = data->dx = XMAX/(MX+1); /* Set grid coefficients in data */ dy = data->dy = YMAX/(MY+1); data->hdcoef = ONE/(dx*dx); data->hacoef = HALF/(TWO*dx); data->vdcoef = ONE/(dy*dy); data->nthreads = num_threads; SetIC(u, data); /* Initialize u vector */ /* Call CVodeCreate to create the solver memory and specify the * Backward Differentiation Formula */ cvode_mem = CVodeCreate(CV_BDF, sunctx); if(check_retval((void *)cvode_mem, "CVodeCreate", 0)) return(1); /* Call CVodeInit to initialize the integrator memory and specify the * user's right hand side function in u'=f(t,u), the inital time T0, and * the initial dependent variable vector u. */ retval = CVodeInit(cvode_mem, f, T0, u); if(check_retval(&retval, "CVodeInit", 1)) return(1); /* Call CVodeSStolerances to specify the scalar relative tolerance * and scalar absolute tolerance */ retval = CVodeSStolerances(cvode_mem, reltol, abstol); if (check_retval(&retval, "CVodeSStolerances", 1)) return(1); /* Set the pointer to user-defined data */ retval = CVodeSetUserData(cvode_mem, data); if(check_retval(&retval, "CVodeSetUserData", 1)) return(1); /* Create banded SUNMatrix for use in linear solves -- since this will be factored, set the storage bandwidth to be the sum of upper and lower bandwidths */ A = SUNBandMatrix(NEQ, MY, MY, sunctx); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); /* Create banded SUNLinearSolver object for use by CVode */ LS = SUNLinSol_Band(u, A, sunctx); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); /* Call CVodeSetLinearSolver to attach the matrix and linear solver to CVode */ retval = CVodeSetLinearSolver(cvode_mem, LS, A); if(check_retval(&retval, "CVodeSetLinearSolver", 1)) return(1); /* Set the user-supplied Jacobian routine Jac */ retval = CVodeSetJacFn(cvode_mem, Jac); if(check_retval(&retval, "CVodeSetJacFn", 1)) return(1); /* In loop over output points: call CVode, print results, test for errors */ umax = N_VMaxNorm(u); PrintHeader(reltol, abstol, umax); for(iout=1, tout=T1; iout <= NOUT; iout++, tout += DTOUT) { retval = CVode(cvode_mem, tout, u, &t, CV_NORMAL); if(check_retval(&retval, "CVode", 1)) break; umax = N_VMaxNorm(u); retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); PrintOutput(t, umax, nst); } PrintFinalStats(cvode_mem); /* Print some final statistics */ printf("num_threads = %i\n\n", num_threads); N_VDestroy(u); /* Free the u vector */ CVodeFree(&cvode_mem); /* Free the integrator memory */ SUNLinSolFree(LS); /* Free the linear solver memory */ SUNMatDestroy(A); /* Free the matrix memory */ free(data); /* Free the user data */ SUNContext_Free(&sunctx); return(0); } /* *------------------------------- * Functions called by the solver *------------------------------- */ /* f routine. Compute f(t,u). */ static int f(realtype t, N_Vector u,N_Vector udot, void *user_data) { realtype uij, udn, uup, ult, urt, hordc, horac, verdc, hdiff, hadv, vdiff; realtype *udata, *dudata; sunindextype i, j; UserData data; i = j = 0; udata = NV_DATA_OMP(u); dudata = NV_DATA_OMP(udot); /* Extract needed constants from data */ data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; /* Loop over all grid points. */ #pragma omp parallel for default(shared) private(j, i, uij, udn, uup, ult, urt, hdiff, hadv, vdiff) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { /* Extract u at x_i, y_j and four neighboring points */ uij = IJth(udata, i, j); udn = (j == 1) ? ZERO : IJth(udata, i, j-1); uup = (j == MY) ? ZERO : IJth(udata, i, j+1); ult = (i == 1) ? ZERO : IJth(udata, i-1, j); urt = (i == MX) ? ZERO : IJth(udata, i+1, j); /* Set diffusion and advection terms and load into udot */ hdiff = hordc*(ult - TWO*uij + urt); hadv = horac*(urt - ult); vdiff = verdc*(uup - TWO*uij + udn); IJth(dudata, i, j) = hdiff + hadv + vdiff; } } return(0); } /* Jacobian routine. Compute J(t,u). */ static int Jac(realtype t, N_Vector u, N_Vector fu, SUNMatrix J, void *user_data, N_Vector tmp1, N_Vector tmp2, N_Vector tmp3) { sunindextype i, j, k; realtype *kthCol, hordc, horac, verdc; UserData data; /* The components of f = udot that depend on u(i,j) are f(i,j), f(i-1,j), f(i+1,j), f(i,j-1), f(i,j+1), with df(i,j)/du(i,j) = -2 (1/dx^2 + 1/dy^2) df(i-1,j)/du(i,j) = 1/dx^2 + .25/dx (if i > 1) df(i+1,j)/du(i,j) = 1/dx^2 - .25/dx (if i < MX) df(i,j-1)/du(i,j) = 1/dy^2 (if j > 1) df(i,j+1)/du(i,j) = 1/dy^2 (if j < MY) */ i = j = 0; data = (UserData) user_data; hordc = data->hdcoef; horac = data->hacoef; verdc = data->vdcoef; #pragma omp parallel for collapse(2) default(shared) private(i, j, k, kthCol) num_threads(data->nthreads) for (j=1; j <= MY; j++) { for (i=1; i <= MX; i++) { k = j-1 + (i-1)*MY; kthCol = SUNBandMatrix_Column(J,k); /* set the kth column of J */ SM_COLUMN_ELEMENT_B(kthCol,k,k) = -TWO*(verdc+hordc); if (i != 1) SM_COLUMN_ELEMENT_B(kthCol,k-MY,k) = hordc + horac; if (i != MX) SM_COLUMN_ELEMENT_B(kthCol,k+MY,k) = hordc - horac; if (j != 1) SM_COLUMN_ELEMENT_B(kthCol,k-1,k) = verdc; if (j != MY) SM_COLUMN_ELEMENT_B(kthCol,k+1,k) = verdc; } } return(0); } /* *------------------------------- * Private helper functions *------------------------------- */ /* Set initial conditions in u vector */ static void SetIC(N_Vector u, UserData data) { sunindextype i, j; realtype x, y, dx, dy; realtype *udata; i = j = 0; /* Extract needed constants from data */ dx = data->dx; dy = data->dy; /* Set pointer to data array in vector u. */ udata = NV_DATA_OMP(u); /* Load initial profile into u vector */ #pragma omp parallel for default(shared) private(j, i, y, x) for (j=1; j <= MY; j++) { y = j*dy; for (i=1; i <= MX; i++) { x = i*dx; IJth(udata,i,j) = x*(XMAX - x)*y*(YMAX - y)*exp(FIVE*x*y); } } } /* Print first lines of output (problem description) */ static void PrintHeader(realtype reltol, realtype abstol, realtype umax) { printf("\n2-D Advection-Diffusion Equation\n"); printf("Mesh dimensions = %d X %d\n", MX, MY); printf("Total system size = %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: reltol = %Lg abstol = %Lg\n\n", reltol, abstol); printf("At t = %Lg max.norm(u) =%14.6Le \n", T0, umax); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #else printf("Tolerance parameters: reltol = %g abstol = %g\n\n", reltol, abstol); printf("At t = %g max.norm(u) =%14.6e \n", T0, umax); #endif return; } /* Print current value */ static void PrintOutput(realtype t, realtype umax, long int nst) { #if defined(SUNDIALS_EXTENDED_PRECISION) printf("At t = %4.2Lf max.norm(u) =%14.6Le nst = %4ld\n", t, umax, nst); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #else printf("At t = %4.2f max.norm(u) =%14.6e nst = %4ld\n", t, umax, nst); #endif return; } /* Get and print some final statistics */ static void PrintFinalStats(void *cvode_mem) { int retval; long int nst, nfe, nsetups, netf, nni, ncfn, nje, nfeLS; retval = CVodeGetNumSteps(cvode_mem, &nst); check_retval(&retval, "CVodeGetNumSteps", 1); retval = CVodeGetNumRhsEvals(cvode_mem, &nfe); check_retval(&retval, "CVodeGetNumRhsEvals", 1); retval = CVodeGetNumLinSolvSetups(cvode_mem, &nsetups); check_retval(&retval, "CVodeGetNumLinSolvSetups", 1); retval = CVodeGetNumErrTestFails(cvode_mem, &netf); check_retval(&retval, "CVodeGetNumErrTestFails", 1); retval = CVodeGetNumNonlinSolvIters(cvode_mem, &nni); check_retval(&retval, "CVodeGetNumNonlinSolvIters", 1); retval = CVodeGetNumNonlinSolvConvFails(cvode_mem, &ncfn); check_retval(&retval, "CVodeGetNumNonlinSolvConvFails", 1); retval = CVodeGetNumJacEvals(cvode_mem, &nje); check_retval(&retval, "CVodeGetNumJacEvals", 1); retval = CVodeGetNumLinRhsEvals(cvode_mem, &nfeLS); check_retval(&retval, "CVodeGetNumLinRhsEvals", 1); printf("\nFinal Statistics:\n"); printf("nst = %-6ld nfe = %-6ld nsetups = %-6ld nfeLS = %-6ld nje = %ld\n", nst, nfe, nsetups, nfeLS, nje); printf("nni = %-6ld ncfn = %-6ld netf = %ld\n", nni, ncfn, netf); return; } /* Check function return value... opt == 0 means SUNDIALS function allocates memory so check if returned NULL pointer opt == 1 means SUNDIALS function returns an integer value so check if retval < 0 opt == 2 means function allocates memory so check if returned NULL pointer */ static int check_retval(void *returnvalue, const char *funcname, int opt) { int *retval; /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ if (opt == 0 && returnvalue == NULL) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } /* Check if retval < 0 */ else if (opt == 1) { retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); }} /* Check if function returned NULL pointer - no memory allocated */ else if (opt == 2 && returnvalue == NULL) { fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
mg.pmc.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - MG This benchmark is an OpenMP C version of the NPB MG code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: E. Barszcz P. Frederickson A. Woo M. Yarrow OpenMP C version: S. Satoh 3.0 structure translation: F. Conti --------------------------------------------------------------------*/ #include "../common/npb-C.h" #include "globals.h" #include "../math/nas_math.h" #include <nautilus/nautilus.h> #include <nautilus/shell.h> #include <nautilus/pmc.h> #include <nautilus/mm.h> /* parameters */ #define T_BENCH 1 #define T_INIT 2 /* global variables */ /* common /grid/ */ static int is1, is2, is3, ie1, ie2, ie3; /* functions prototypes */ static void setup(int *n1, int *n2, int *n3, int lt); static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k); static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k); static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ); static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ); static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ); static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz); static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk); static void comm3(double ***u, int n1, int n2, int n3, int kk); static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k); static void showall(double ***z, int n1, int n2, int n3); static double power( double a, int n ); static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ); static void zero3(double ***z, int n1, int n2, int n3); static void nonzero(double ***z, int n1, int n2, int n3); static void * __m=0; static void * __o=0; #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) //#define _malloc(n) ({ if (!__m) { __m = malloc(1UL<<33);__o=__m; if(!__m){printf("no __m\n"); }} void *__r = __m; unsigned long long __n = ALIGN(n, 16); __m+=__n; __r; }) #define N_PAGES (1024*1024*2UL*512UL) #define _malloc(n) ({ if (!__m) { __m = mmap(0, N_PAGES, PROT_READ | PROT_WRITE,MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, 0, 0);__o=__m; if(!__m){printf("no __m\n"); }} void *__r = __m; unsigned long long __n = ALIGN(n, 16); __m+=__n; __r; }) //#define _malloc(n) malloc(n) #define _free() free(__o) /*-------------------------------------------------------------------- program mg c-------------------------------------------------------------------*/ int program_MG(char *_buf, void* _priv); int program_MG_profile(char *_, void *__); static struct shell_cmd_impl nas_mg_impl = { .cmd = "nas-mg", .help_str = "NAS parallel benchmark MG", .handler = program_MG_profile, }; nk_register_shell_cmd(nas_mg_impl); int program_MG_profile(char *_, void *__){ #ifdef NAUT_CONFIG_PROFILE nk_instrument_clear(); nk_instrument_start(); #endif program_MG(_,__); #ifdef NAUT_CONFIG_PROFILE nk_instrument_end(); nk_instrument_query(); #endif return 0; } int program_MG(char * _buf, void *_priv) { /*------------------------------------------------------------------------- c k is the current level. It is passed down through subroutine args c and is NOT global. it is the current iteration c------------------------------------------------------------------------*/ int k, it; double t, tinit, mflops; int nthreads = 1; int enable_pmc = 1; int choice = 0; if(sscanf(_buf,"nas-mg %d", &choice)!=1){ enable_pmc = 0; } /*------------------------------------------------------------------------- c These arrays are in common because they are quite large c and probably shouldn't be allocated on the stack. They c are always passed as subroutine args. c------------------------------------------------------------------------*/ double ****u, ***v, ****r; double a[4], c[4]; double rnm2, rnmu; double epsilon = 1.0e-8; int n1, n2, n3, nit; double verify_value; boolean verified; int i, j, l; // FILE *fp; timer_clear(T_BENCH); timer_clear(T_INIT); timer_start(T_INIT); /*---------------------------------------------------------------------- c Read in and broadcast input data c---------------------------------------------------------------------*/ printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - MG Benchmark\n\n"); /* fp = fopen("mg.input", "r"); */ /* if (fp != NULL) { */ /* printf(" Reading from input file mg.input\n"); */ /* fscanf(fp, "%d", &lt); */ /* while(fgetc(fp) != '\n'); */ /* fscanf(fp, "%d%d%d", &nx[lt], &ny[lt], &nz[lt]); */ /* while(fgetc(fp) != '\n'); */ /* fscanf(fp, "%d", &nit); */ /* while(fgetc(fp) != '\n'); */ /* for (i = 0; i <= 7; i++) { */ /* fscanf(fp, "%d", &debug_vec[i]); */ /* } */ /* fclose(fp); */ /* } else { */ /* printf(" No input file. Using compiled defaults\n"); */ lt = LT_DEFAULT; nit = NIT_DEFAULT; nx[lt] = NX_DEFAULT; ny[lt] = NY_DEFAULT; nz[lt] = NZ_DEFAULT; for (i = 0; i <= 7; i++) { debug_vec[i] = DEBUG_DEFAULT; } // } if ( (nx[lt] != ny[lt]) || (nx[lt] != nz[lt]) ) { Class = 'U'; } else if( nx[lt] == 32 && nit == 4 ) { Class = 'S'; } else if( nx[lt] == 64 && nit == 40 ) { Class = 'W'; } else if( nx[lt] == 256 && nit == 20 ) { Class = 'B'; } else if( nx[lt] == 512 && nit == 20 ) { Class = 'C'; } else if( nx[lt] == 256 && nit == 4 ) { Class = 'A'; } else { Class = 'U'; } /*-------------------------------------------------------------------- c Use these for debug info: c--------------------------------------------------------------------- c debug_vec(0) = 1 !=> report all norms c debug_vec(1) = 1 !=> some setup information c debug_vec(1) = 2 !=> more setup information c debug_vec(2) = k => at level k or below, show result of resid c debug_vec(3) = k => at level k or below, show result of psinv c debug_vec(4) = k => at level k or below, show result of rprj c debug_vec(5) = k => at level k or below, show result of interp c debug_vec(6) = 1 => (unused) c debug_vec(7) = 1 => (unused) c-------------------------------------------------------------------*/ a[0] = -8.0/3.0; a[1] = 0.0; a[2] = 1.0/6.0; a[3] = 1.0/12.0; if (Class == 'A' || Class == 'S' || Class =='W') { /*-------------------------------------------------------------------- c Coefficients for the S(a) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/8.0; c[1] = 1.0/32.0; c[2] = -1.0/64.0; c[3] = 0.0; } else { /*-------------------------------------------------------------------- c Coefficients for the S(b) smoother c-------------------------------------------------------------------*/ c[0] = -3.0/17.0; c[1] = 1.0/33.0; c[2] = -1.0/61.0; c[3] = 0.0; } lb = 1; setup(&n1,&n2,&n3,lt); u = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { u[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { u[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { u[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } v = (double ***)malloc(m3[lt]*sizeof(double **)); for (k = 0; k < m3[lt]; k++) { v[k] = (double **)malloc(m2[lt]*sizeof(double *)); for (j = 0; j < m2[lt]; j++) { v[k][j] = (double *)malloc(m1[lt]*sizeof(double)); } } r = (double ****)malloc((lt+1)*sizeof(double ***)); for (l = lt; l >=1; l--) { r[l] = (double ***)malloc(m3[l]*sizeof(double **)); for (k = 0; k < m3[l]; k++) { r[l][k] = (double **)malloc(m2[l]*sizeof(double *)); for (j = 0; j < m2[l]; j++) { r[l][k][j] = (double *)malloc(m1[l]*sizeof(double)); } } } zero3(u[lt],n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); norm2u3(v,n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /* printf("\n norms of random v are\n"); printf(" %4d%19.12e%19.12e\n", 0, rnm2, rnmu); printf(" about to evaluate resid, k= %d\n", lt);*/ printf(" Size: %3dx%3dx%3d (class %1c)\n", nx[lt], ny[lt], nz[lt], Class); printf(" Iterations: %3d\n", nit); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); /*c--------------------------------------------------------------------- c One iteration for startup c---------------------------------------------------------------------*/ mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); setup(&n1,&n2,&n3,lt); zero3(u[lt],n1,n2,n3); zran3(v,n1,n2,n3,nx[lt],ny[lt],lt); timer_stop(T_INIT); //PMC start perf_event_t *perf = nk_pmc_create(choice); long start_cnt = 0; if(enable_pmc){ nk_pmc_start(perf); start_cnt = nk_pmc_read(perf); } //PMC timer_start(T_BENCH); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); for ( it = 1; it <= nit; it++) { mg3P(u,v,r,a,c,n1,n2,n3,lt); resid(u[lt],v,r[lt],n1,n2,n3,a,lt); } norm2u3(r[lt],n1,n2,n3,&rnm2,&rnmu,nx[lt],ny[lt],nz[lt]); #pragma omp parallel { #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_BENCH); //PMC END long stop_cnt = 0; if(enable_pmc){ stop_cnt = nk_pmc_read(perf); nk_pmc_stop(perf); nk_pmc_destroy(perf); } //PMC t = timer_read(T_BENCH); tinit = timer_read(T_INIT); verified = FALSE; verify_value = 0.0; printf(" Initialization time: %15.3f seconds\n", tinit); printf(" Benchmark completed\n"); if (Class != 'U') { if (Class == 'S') { verify_value = 0.530770700573e-04; } else if (Class == 'W') { verify_value = 0.250391406439e-17; /* 40 iterations*/ /* 0.183103168997d-044 iterations*/ } else if (Class == 'A') { verify_value = 0.2433365309e-5; } else if (Class == 'B') { verify_value = 0.180056440132e-5; } else if (Class == 'C') { verify_value = 0.570674826298e-06; } if ( fabs( rnm2 - verify_value ) <= epsilon ) { verified = TRUE; printf(" VERIFICATION SUCCESSFUL\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" Error is %20.12e\n", rnm2 - verify_value); } else { verified = FALSE; printf(" VERIFICATION FAILED\n"); printf(" L2 Norm is %20.12e\n", rnm2); printf(" The correct L2 Norm is %20.12e\n", verify_value); } } else { verified = FALSE; printf(" Problem size unknown\n"); printf(" NO VERIFICATION PERFORMED\n"); } if ( t != 0.0 ) { int nn = nx[lt]*ny[lt]*nz[lt]; mflops = 58.*nit*nn*1.0e-6 / t; } else { mflops = 0.0; } c_print_results("MG", Class, nx[lt], ny[lt], nz[lt], nit, nthreads, t, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); //PMC print if(enable_pmc){ char intel_event[10][128] = { "Unhalted Core Cycles", "Instructions Retired", "Unhalted Reference Cycles", "LLC References", "LLC Misses", "Branch Instructions Retired", "Branch Misses Retired", }; printf("%s : %ld \n",intel_event[choice],(stop_cnt-start_cnt)); } _free(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(int *n1, int *n2, int *n3, int lt) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int k; for ( k = lt-1; k >= 1; k--) { nx[k] = nx[k+1]/2; ny[k] = ny[k+1]/2; nz[k] = nz[k+1]/2; } for (k = 1; k <= lt; k++) { m1[k] = nx[k]+2; m2[k] = nz[k]+2; m3[k] = ny[k]+2; } is1 = 1; ie1 = nx[lt]; *n1 = nx[lt]+2; is2 = 1; ie2 = ny[lt]; *n2 = ny[lt]+2; is3 = 1; ie3 = nz[lt]; *n3 = nz[lt]+2; if (debug_vec[1] >= 1 ) { printf(" in setup, \n"); printf(" lt nx ny nz n1 n2 n3 is1 is2 is3 ie1 ie2 ie3\n"); printf("%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d%4d\n", lt,nx[lt],ny[lt],nz[lt],*n1,*n2,*n3,is1,is2,is3,ie1,ie2,ie3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void mg3P(double ****u, double ***v, double ****r, double a[4], double c[4], int n1, int n2, int n3, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c multigrid V-cycle routine c-------------------------------------------------------------------*/ int j; /*-------------------------------------------------------------------- c down cycle. c restrict the residual from the find grid to the coarse c-------------------------------------------------------------------*/ for (k = lt; k >= lb+1; k--) { j = k-1; rprj3(r[k], m1[k], m2[k], m3[k], r[j], m1[j], m2[j], m3[j], k); } k = lb; /*-------------------------------------------------------------------- c compute an approximate solution on the coarsest grid c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); for (k = lb+1; k <= lt-1; k++) { j = k-1; /*-------------------------------------------------------------------- c prolongate from level k-1 to k c-------------------------------------------------------------------*/ zero3(u[k], m1[k], m2[k], m3[k]); interp(u[j], m1[j], m2[j], m3[j], u[k], m1[k], m2[k], m3[k], k); /*-------------------------------------------------------------------- c compute residual for level k c-------------------------------------------------------------------*/ resid(u[k], r[k], r[k], m1[k], m2[k], m3[k], a, k); /*-------------------------------------------------------------------- c apply smoother c-------------------------------------------------------------------*/ psinv(r[k], u[k], m1[k], m2[k], m3[k], c, k); } j = lt - 1; k = lt; interp(u[j], m1[j], m2[j], m3[j], u[lt], n1, n2, n3, k); resid(u[lt], v, r[lt], n1, n2, n3, a, k); psinv(r[lt], u[lt], n1, n2, n3, c, k); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void psinv( double ***r, double ***u, int n1, int n2, int n3, double c[4], int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c psinv applies an approximate inverse as smoother: u = u + Cr c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Presuming coefficient c(3) is zero (the NPB assumes this, c but it is thus not a general case), 2A + 1M may be eliminated, c resulting in 13A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double r1[M], r2[M]; #pragma omp parallel for default(shared) private(i1,i2,i3,r1,r2) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { r1[i1] = r[i3][i2-1][i1] + r[i3][i2+1][i1] + r[i3-1][i2][i1] + r[i3+1][i2][i1]; r2[i1] = r[i3-1][i2-1][i1] + r[i3-1][i2+1][i1] + r[i3+1][i2-1][i1] + r[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { u[i3][i2][i1] = u[i3][i2][i1] + c[0] * r[i3][i2][i1] + c[1] * ( r[i3][i2][i1-1] + r[i3][i2][i1+1] + r1[i1] ) + c[2] * ( r2[i1] + r1[i1-1] + r1[i1+1] ); /*-------------------------------------------------------------------- c Assume c(3) = 0 (Enable line below if c(3) not= 0) c--------------------------------------------------------------------- c > + c(3) * ( r2(i1-1) + r2(i1+1) ) c-------------------------------------------------------------------*/ } } } /*-------------------------------------------------------------------- c exchange boundary points c-------------------------------------------------------------------*/ comm3(u,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { rep_nrm(u,n1,n2,n3," psinv",k); } if ( debug_vec[3] >= k ) { showall(u,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void resid( double ***u, double ***v, double ***r, int n1, int n2, int n3, double a[4], int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c resid computes the residual: r = v - Au c c This implementation costs 15A + 4M per result, where c A and M denote the costs of Addition (or Subtraction) and c Multiplication, respectively. c Presuming coefficient a(1) is zero (the NPB assumes this, c but it is thus not a general case), 3A + 1M may be eliminated, c resulting in 12A + 3M. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int i3, i2, i1; double u1[M], u2[M]; #pragma omp parallel for default(shared) private(i1,i2,i3,u1,u2) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 0; i1 < n1; i1++) { u1[i1] = u[i3][i2-1][i1] + u[i3][i2+1][i1] + u[i3-1][i2][i1] + u[i3+1][i2][i1]; u2[i1] = u[i3-1][i2-1][i1] + u[i3-1][i2+1][i1] + u[i3+1][i2-1][i1] + u[i3+1][i2+1][i1]; } for (i1 = 1; i1 < n1-1; i1++) { r[i3][i2][i1] = v[i3][i2][i1] - a[0] * u[i3][i2][i1] /*-------------------------------------------------------------------- c Assume a(1) = 0 (Enable 2 lines below if a(1) not= 0) c--------------------------------------------------------------------- c > - a(1) * ( u(i1-1,i2,i3) + u(i1+1,i2,i3) c > + u1(i1) ) c-------------------------------------------------------------------*/ - a[2] * ( u2[i1] + u1[i1-1] + u1[i1+1] ) - a[3] * ( u2[i1-1] + u2[i1+1] ); } } } /*-------------------------------------------------------------------- c exchange boundary data c--------------------------------------------------------------------*/ comm3(r,n1,n2,n3,k); if (debug_vec[0] >= 1 ) { rep_nrm(r,n1,n2,n3," resid",k); } if ( debug_vec[2] >= k ) { showall(r,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rprj3( double ***r, int m1k, int m2k, int m3k, double ***s, int m1j, int m2j, int m3j, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c rprj3 projects onto the next coarser grid, c using a trilinear Finite Element projection: s = r' = P r c c This implementation costs 20A + 4M per result, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. c-------------------------------------------------------------------*/ int j3, j2, j1, i3, i2, i1, d1, d2, d3; double x1[M], y1[M], x2, y2; if (m1k == 3) { d1 = 2; } else { d1 = 1; } if (m2k == 3) { d2 = 2; } else { d2 = 1; } if (m3k == 3) { d3 = 2; } else { d3 = 1; } #pragma omp parallel for default(shared) private(j1,j2,j3,i1,i2,i3,x1,y1,x2,y2) for (j3 = 1; j3 < m3j-1; j3++) { i3 = 2*j3-d3; /*C i3 = 2*j3-1*/ for (j2 = 1; j2 < m2j-1; j2++) { i2 = 2*j2-d2; /*C i2 = 2*j2-1*/ for (j1 = 1; j1 < m1j; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ x1[i1] = r[i3+1][i2][i1] + r[i3+1][i2+2][i1] + r[i3][i2+1][i1] + r[i3+2][i2+1][i1]; y1[i1] = r[i3][i2][i1] + r[i3+2][i2][i1] + r[i3][i2+2][i1] + r[i3+2][i2+2][i1]; } for (j1 = 1; j1 < m1j-1; j1++) { i1 = 2*j1-d1; /*C i1 = 2*j1-1*/ y2 = r[i3][i2][i1+1] + r[i3+2][i2][i1+1] + r[i3][i2+2][i1+1] + r[i3+2][i2+2][i1+1]; x2 = r[i3+1][i2][i1+1] + r[i3+1][i2+2][i1+1] + r[i3][i2+1][i1+1] + r[i3+2][i2+1][i1+1]; s[j3][j2][j1] = 0.5 * r[i3+1][i2+1][i1+1] + 0.25 * ( r[i3+1][i2+1][i1] + r[i3+1][i2+1][i1+2] + x2) + 0.125 * ( x1[i1] + x1[i1+2] + y2) + 0.0625 * ( y1[i1] + y1[i1+2] ); } } } comm3(s,m1j,m2j,m3j,k-1); if (debug_vec[0] >= 1 ) { rep_nrm(s,m1j,m2j,m3j," rprj3",k-1); } if (debug_vec[4] >= k ) { showall(s,m1j,m2j,m3j); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void interp( double ***z, int mm1, int mm2, int mm3, double ***u, int n1, int n2, int n3, int k ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c interp adds the trilinear interpolation of the correction c from the coarser grid to the current approximation: u = u + Qu' c c Observe that this implementation costs 16A + 4M, where c A and M denote the costs of Addition and Multiplication. c Note that this vectorizes, and is also fine for cache c based machines. Vector machines may get slightly better c performance however, with 8 separate "do i1" loops, rather than 4. c-------------------------------------------------------------------*/ int i3, i2, i1, d1, d2, d3, t1, t2, t3; /* c note that m = 1037 in globals.h but for this only need to be c 535 to handle up to 1024^3 c integer m c parameter( m=535 ) */ double z1[M], z2[M], z3[M]; if ( n1 != 3 && n2 != 3 && n3 != 3 ) { #pragma omp parallel for default(shared) private(i1,i2,i3,z1,z2,z3) for (i3 = 0; i3 < mm3-1; i3++) { for (i2 = 0; i2 < mm2-1; i2++) { for (i1 = 0; i1 < mm1; i1++) { z1[i1] = z[i3][i2+1][i1] + z[i3][i2][i1]; z2[i1] = z[i3+1][i2][i1] + z[i3][i2][i1]; z3[i1] = z[i3+1][i2+1][i1] + z[i3+1][i2][i1] + z1[i1]; } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2][2*i1] = u[2*i3][2*i2][2*i1] +z[i3][i2][i1]; u[2*i3][2*i2][2*i1+1] = u[2*i3][2*i2][2*i1+1] +0.5*(z[i3][i2][i1+1]+z[i3][i2][i1]); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3][2*i2+1][2*i1] = u[2*i3][2*i2+1][2*i1] +0.5 * z1[i1]; u[2*i3][2*i2+1][2*i1+1] = u[2*i3][2*i2+1][2*i1+1] +0.25*( z1[i1] + z1[i1+1] ); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2][2*i1] = u[2*i3+1][2*i2][2*i1] +0.5 * z2[i1]; u[2*i3+1][2*i2][2*i1+1] = u[2*i3+1][2*i2][2*i1+1] +0.25*( z2[i1] + z2[i1+1] ); } for (i1 = 0; i1 < mm1-1; i1++) { u[2*i3+1][2*i2+1][2*i1] = u[2*i3+1][2*i2+1][2*i1] +0.25* z3[i1]; u[2*i3+1][2*i2+1][2*i1+1] = u[2*i3+1][2*i2+1][2*i1+1] +0.125*( z3[i1] + z3[i1+1] ); } } } } else { if (n1 == 3) { d1 = 2; t1 = 1; } else { d1 = 1; t1 = 0; } if (n2 == 3) { d2 = 2; t2 = 1; } else { d2 = 1; t2 = 0; } if (n3 == 3) { d3 = 2; t3 = 1; } else { d3 = 1; t3 = 0; } #pragma omp parallel default(shared) private(i1,i2,i3) { #pragma omp for for ( i3 = d3; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-d1-1] +z[i3-1][i2-1][i1-1]; } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-d2-1][2*i1-t1-1] +0.5*(z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-d1-1] +0.5*(z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-d3-1][2*i2-t2-1][2*i1-t1-1] +0.25*(z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } #pragma omp for nowait for ( i3 = 1; i3 <= mm3-1; i3++) { for ( i2 = d2; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-d1-1] +0.5*(z[i3][i2-1][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-d2-1][2*i1-t1-1] +0.25*(z[i3][i2-1][i1]+z[i3][i2-1][i1-1] +z[i3-1][i2-1][i1]+z[i3-1][i2-1][i1-1]); } } for ( i2 = 1; i2 <= mm2-1; i2++) { for ( i1 = d1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-d1-1] +0.25*(z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } for ( i1 = 1; i1 <= mm1-1; i1++) { u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] = u[2*i3-t3-1][2*i2-t2-1][2*i1-t1-1] +0.125*(z[i3][i2][i1]+z[i3][i2-1][i1] +z[i3][i2][i1-1]+z[i3][i2-1][i1-1] +z[i3-1][i2][i1]+z[i3-1][i2-1][i1] +z[i3-1][i2][i1-1]+z[i3-1][i2-1][i1-1]); } } } } }//end #pragma omp parallel if (debug_vec[0] >= 1 ) { rep_nrm(z,mm1,mm2,mm3,"z: inter",k-1); rep_nrm(u,n1,n2,n3,"u: inter",k); } if ( debug_vec[5] >= k ) { showall(z,mm1,mm2,mm3); showall(u,n1,n2,n3); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void norm2u3(double ***r, int n1, int n2, int n3, double *rnm2, double *rnmu, int nx, int ny, int nz) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c norm2u3 evaluates approximations to the L2 norm and the c uniform (or L-infinity or Chebyshev) norm, under the c assumption that the boundaries are periodic or zero. Add the c boundaries in with half weight (quarter weight on the edges c and eighth weight at the corners) for inhomogeneous boundaries. c-------------------------------------------------------------------*/ double s = 0.0; int i3, i2, i1, n; double a = 0.0, tmp = 0.0; n = nx*ny*nz; #pragma omp parallel for default(shared) private(i1,i2,i3,a) reduction(+:s) reduction(max:tmp) for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { s = s + r[i3][i2][i1] * r[i3][i2][i1]; a = fabs(r[i3][i2][i1]); if (a > tmp) tmp = a; } } } *rnmu = tmp; *rnm2 = sqrt(s/(double)n); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void rep_nrm(double ***u, int n1, int n2, int n3, char *title, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c report on norm c-------------------------------------------------------------------*/ double rnm2, rnmu; norm2u3(u,n1,n2,n3,&rnm2,&rnmu,nx[kk],ny[kk],nz[kk]); printf(" Level%2d in %8s: norms =%21.14e%21.14e\n", kk, title, rnm2, rnmu); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void comm3(double ***u, int n1, int n2, int n3, int kk) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c comm3 organizes the communication on all borders c-------------------------------------------------------------------*/ int i1, i2, i3; /* axis = 1 */ #pragma omp parallel default(shared) private(i1,i2,i3) { #pragma omp for for ( i3 = 1; i3 < n3-1; i3++) { for ( i2 = 1; i2 < n2-1; i2++) { u[i3][i2][n1-1] = u[i3][i2][1]; u[i3][i2][0] = u[i3][i2][n1-2]; } // } /* axis = 2 */ //#pragma omp for // for ( i3 = 1; i3 < n3-1; i3++) { for ( i1 = 0; i1 < n1; i1++) { u[i3][n2-1][i1] = u[i3][1][i1]; u[i3][0][i1] = u[i3][n2-2][i1]; } } /* axis = 3 */ #pragma omp for nowait for ( i2 = 0; i2 < n2; i2++) { for ( i1 = 0; i1 < n1; i1++) { u[n3-1][i2][i1] = u[1][i2][i1]; u[0][i2][i1] = u[n3-2][i2][i1]; } } }//end #pragma omp parallel } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zran3(double ***z, int n1, int n2, int n3, int nx, int ny, int k) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c zran3 loads +1 at ten randomly chosen points, c loads -1 at a different ten random points, c and zero elsewhere. c-------------------------------------------------------------------*/ #define MM 10 static double __A=1220703125.e0; #define A __A #define X 314159265.e0 int i0, m0, m1; int i1, i2, i3, d1, e1, e2, e3; double xx, x0, x1, a1, a2, ai; double ten[MM][2], best; int i, j1[MM][2], j2[MM][2], j3[MM][2]; int jg[4][MM][2]; double rdummy; a1 = power( A, nx ); a2 = power( A, nx*ny ); zero3(z,n1,n2,n3); i = is1-1+nx*(is2-1+ny*(is3-1)); ai = power( A, i ); d1 = ie1 - is1 + 1; e1 = ie1 - is1 + 2; e2 = ie2 - is2 + 2; e3 = ie3 - is3 + 2; x0 = X; rdummy = randlc( &x0, ai ); for (i3 = 1; i3 < e3; i3++) { x1 = x0; for (i2 = 1; i2 < e2; i2++) { xx = x1; vranlc( d1, &xx, A, &(z[i3][i2][0])); rdummy = randlc( &x1, a1 ); } rdummy = randlc( &x0, a2 ); } /*-------------------------------------------------------------------- c call comm3(z,n1,n2,n3) c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c each processor looks for twenty candidates c-------------------------------------------------------------------*/ for (i = 0; i < MM; i++) { ten[i][1] = 0.0; j1[i][1] = 0; j2[i][1] = 0; j3[i][1] = 0; ten[i][0] = 1.0; j1[i][0] = 0; j2[i][0] = 0; j3[i][0] = 0; } for (i3 = 1; i3 < n3-1; i3++) { for (i2 = 1; i2 < n2-1; i2++) { for (i1 = 1; i1 < n1-1; i1++) { if ( z[i3][i2][i1] > ten[0][1] ) { ten[0][1] = z[i3][i2][i1]; j1[0][1] = i1; j2[0][1] = i2; j3[0][1] = i3; bubble( ten, j1, j2, j3, MM, 1 ); } if ( z[i3][i2][i1] < ten[0][0] ) { ten[0][0] = z[i3][i2][i1]; j1[0][0] = i1; j2[0][0] = i2; j3[0][0] = i3; bubble( ten, j1, j2, j3, MM, 0 ); } } } } /*-------------------------------------------------------------------- c Now which of these are globally best? c-------------------------------------------------------------------*/ i1 = MM - 1; i0 = MM - 1; for (i = MM - 1 ; i >= 0; i--) { best = z[j3[i1][1]][j2[i1][1]][j1[i1][1]]; if (best == z[j3[i1][1]][j2[i1][1]][j1[i1][1]]) { jg[0][i][1] = 0; jg[1][i][1] = is1 - 1 + j1[i1][1]; jg[2][i][1] = is2 - 1 + j2[i1][1]; jg[3][i][1] = is3 - 1 + j3[i1][1]; i1 = i1-1; } else { jg[0][i][1] = 0; jg[1][i][1] = 0; jg[2][i][1] = 0; jg[3][i][1] = 0; } ten[i][1] = best; best = z[j3[i0][0]][j2[i0][0]][j1[i0][0]]; if (best == z[j3[i0][0]][j2[i0][0]][j1[i0][0]]) { jg[0][i][0] = 0; jg[1][i][0] = is1 - 1 + j1[i0][0]; jg[2][i][0] = is2 - 1 + j2[i0][0]; jg[3][i][0] = is3 - 1 + j3[i0][0]; i0 = i0-1; } else { jg[0][i][0] = 0; jg[1][i][0] = 0; jg[2][i][0] = 0; jg[3][i][0] = 0; } ten[i][0] = best; } m1 = i1+1; m0 = i0+1; /* printf(" negative charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][0], jg[2][i][0], jg[3][i][0]); } printf("\n positive charges at"); for (i = 0; i < MM; i++) { if (i%5 == 0) printf("\n"); printf(" (%3d,%3d,%3d)", jg[1][i][1], jg[2][i][1], jg[3][i][1]); } printf("\n small random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][0]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][0]); } printf("\n large random numbers were\n"); for (i = MM-1; i >= 0; i--) { printf(" %15.8e", ten[i][1]); } printf("\n and they were found on processor number\n"); for (i = MM-1; i >= 0; i--) { printf(" %4d", jg[0][i][1]); } printf("\n");*/ #pragma omp parallel for private(i2, i1) for (i3 = 0; i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } for (i = MM-1; i >= m0; i--) { z[j3[i][0]][j2[i][0]][j1[i][0]] = -1.0; } for (i = MM-1; i >= m1; i--) { z[j3[i][1]][j2[i][1]][j1[i][1]] = 1.0; } comm3(z,n1,n2,n3,k); /*-------------------------------------------------------------------- c call showall(z,n1,n2,n3) c-------------------------------------------------------------------*/ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void showall(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1,i2,i3; int m1, m2, m3; m1 = min(n1,18); m2 = min(n2,14); m3 = min(n3,18); printf("\n"); for (i3 = 0; i3 < m3; i3++) { for (i1 = 0; i1 < m1; i1++) { for (i2 = 0; i2 < m2; i2++) { printf("%6.3f", z[i3][i2][i1]); } printf("\n"); } printf(" - - - - - - - \n"); } printf("\n"); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static double power( double a, int n ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c power raises an integer, disguised as a double c precision real, to an integer power c-------------------------------------------------------------------*/ double aj; int nj; double rdummy; double power; power = 1.0; nj = n; aj = a; while (nj != 0) { if( (nj%2) == 1 ) rdummy = randlc( &power, aj ); rdummy = randlc( &aj, aj ); nj = nj/2; } return (power); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void bubble( double ten[M][2], int j1[M][2], int j2[M][2], int j3[M][2], int m, int ind ) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c bubble does a bubble sort in direction dir c-------------------------------------------------------------------*/ double temp; int i, j_temp; if ( ind == 1 ) { for (i = 0; i < m-1; i++) { if ( ten[i][ind] > ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } else { for (i = 0; i < m-1; i++) { if ( ten[i][ind] < ten[i+1][ind] ) { temp = ten[i+1][ind]; ten[i+1][ind] = ten[i][ind]; ten[i][ind] = temp; j_temp = j1[i+1][ind]; j1[i+1][ind] = j1[i][ind]; j1[i][ind] = j_temp; j_temp = j2[i+1][ind]; j2[i+1][ind] = j2[i][ind]; j2[i][ind] = j_temp; j_temp = j3[i+1][ind]; j3[i+1][ind] = j3[i][ind]; j3[i][ind] = j_temp; } else { return; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void zero3(double ***z, int n1, int n2, int n3) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i1, i2, i3; #pragma omp parallel for private(i1,i2,i3) for (i3 = 0;i3 < n3; i3++) { for (i2 = 0; i2 < n2; i2++) { for (i1 = 0; i1 < n1; i1++) { z[i3][i2][i1] = 0.0; } } } } /*---- end of program ------------------------------------------------*/
omp-parallel-nested.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 2 int main(void) { omp_set_max_active_levels(2); #pragma omp parallel num_threads(THREADS) { int j = omp_get_thread_num(); #pragma omp parallel num_threads(THREADS) { printf("%d: %d/%d level=%d\n", j, omp_get_thread_num(), omp_get_num_threads(), omp_get_level()); } } }
convolution_3x3_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = 4b-4a-inch/4a-64-outch/4b; kernel_tm_pack4.create(2 * inch / 4, 64, (outch / 4) / 2 + (outch / 4) % 2, (size_t)2u * 16, 16); int q = 0; for (; q + 7 < outch; q += 8) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); const Mat k4 = kernel_tm.channel(q + 4); const Mat k5 = kernel_tm.channel(q + 5); const Mat k6 = kernel_tm.channel(q + 6); const Mat k7 = kernel_tm.channel(q + 7); Mat g0 = kernel_tm_pack4.channel(q / 8); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k40[k]; g00[5] = (__fp16)k50[k]; g00[6] = (__fp16)k60[k]; g00[7] = (__fp16)k70[k]; g00[8] = (__fp16)k01[k]; g00[9] = (__fp16)k11[k]; g00[10] = (__fp16)k21[k]; g00[11] = (__fp16)k31[k]; g00[12] = (__fp16)k41[k]; g00[13] = (__fp16)k51[k]; g00[14] = (__fp16)k61[k]; g00[15] = (__fp16)k71[k]; g00[16] = (__fp16)k02[k]; g00[17] = (__fp16)k12[k]; g00[18] = (__fp16)k22[k]; g00[19] = (__fp16)k32[k]; g00[20] = (__fp16)k42[k]; g00[21] = (__fp16)k52[k]; g00[22] = (__fp16)k62[k]; g00[23] = (__fp16)k72[k]; g00[24] = (__fp16)k03[k]; g00[25] = (__fp16)k13[k]; g00[26] = (__fp16)k23[k]; g00[27] = (__fp16)k33[k]; g00[28] = (__fp16)k43[k]; g00[29] = (__fp16)k53[k]; g00[30] = (__fp16)k63[k]; g00[31] = (__fp16)k73[k]; g00 += 32; } } } for (; q + 3 < outch; q += 4) { const Mat k0 = kernel_tm.channel(q); const Mat k1 = kernel_tm.channel(q + 1); const Mat k2 = kernel_tm.channel(q + 2); const Mat k3 = kernel_tm.channel(q + 3); Mat g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4); for (int k = 0; k < 64; k++) { __fp16* g00 = g0.row<__fp16>(k); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); g00[0] = (__fp16)k00[k]; g00[1] = (__fp16)k10[k]; g00[2] = (__fp16)k20[k]; g00[3] = (__fp16)k30[k]; g00[4] = (__fp16)k01[k]; g00[5] = (__fp16)k11[k]; g00[6] = (__fp16)k21[k]; g00[7] = (__fp16)k31[k]; g00[8] = (__fp16)k02[k]; g00[9] = (__fp16)k12[k]; g00[10] = (__fp16)k22[k]; g00[11] = (__fp16)k32[k]; g00[12] = (__fp16)k03[k]; g00[13] = (__fp16)k13[k]; g00[14] = (__fp16)k23[k]; g00[15] = (__fp16)k33[k]; g00 += 16; } } } } static void conv3x3s1_winograd64_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][4]; // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float16x4_t _r00 = vld1_f16(r0); float16x4_t _r01 = vld1_f16(r0 + 4); float16x4_t _r02 = vld1_f16(r0 + 8); float16x4_t _r03 = vld1_f16(r0 + 12); float16x4_t _r04 = vld1_f16(r0 + 16); float16x4_t _r05 = vld1_f16(r0 + 20); float16x4_t _r06 = vld1_f16(r0 + 24); float16x4_t _r07 = vld1_f16(r0 + 28); float16x4_t _tmp0m = vfma_n_f16(vsub_f16(_r00, _r06), vsub_f16(_r04, _r02), 5.25f); float16x4_t _tmp7m = vfma_n_f16(vsub_f16(_r07, _r01), vsub_f16(_r03, _r05), 5.25f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[7][m], _tmp7m); // tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25; // tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25; float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_r02, _r06), _r04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_r01, _r05), _r03, 4.25f); // float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25); // float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25); float16x4_t _tmp1m = vadd_f16(_tmp12a, _tmp12b); float16x4_t _tmp2m = vsub_f16(_tmp12a, _tmp12b); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[2][m], _tmp2m); // tmp[1][m] = tmp12a + tmp12b; // tmp[2][m] = tmp12a - tmp12b; float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); // float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25); // float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2); float16x4_t _tmp3m = vadd_f16(_tmp34a, _tmp34b); float16x4_t _tmp4m = vsub_f16(_tmp34a, _tmp34b); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[4][m], _tmp4m); // tmp[3][m] = tmp34a + tmp34b; // tmp[4][m] = tmp34a - tmp34b; float16x4_t _tmp56a = vfma_n_f16(_r06, vfms_n_f16(_r02, _r04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); // float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4); // float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5); float16x4_t _tmp5m = vadd_f16(_tmp56a, _tmp56b); float16x4_t _tmp6m = vsub_f16(_tmp56a, _tmp56b); vst1_f16(tmp[5][m], _tmp5m); vst1_f16(tmp[6][m], _tmp6m); // tmp[5][m] = tmp56a + tmp56b; // tmp[6][m] = tmp56a - tmp56b; r0 += w * 4; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tm / 8 + j) * 4; __fp16* r0_tm_1 = r0_tm_0 + tiles * 4; __fp16* r0_tm_2 = r0_tm_0 + tiles * 8; __fp16* r0_tm_3 = r0_tm_0 + tiles * 12; __fp16* r0_tm_4 = r0_tm_0 + tiles * 16; __fp16* r0_tm_5 = r0_tm_0 + tiles * 20; __fp16* r0_tm_6 = r0_tm_0 + tiles * 24; __fp16* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _r0tm0 = vfma_n_f16(vsub_f16(_tmp00, _tmp06), vsub_f16(_tmp04, _tmp02), 5.25f); float16x4_t _r0tm7 = vfma_n_f16(vsub_f16(_tmp07, _tmp01), vsub_f16(_tmp03, _tmp05), 5.25f); // r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25; // r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25; float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_tmp01, _tmp05), _tmp03, 4.25f); // float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25); // float tmp12b = (tmp0[1] + tmp0[5] - tmp0[3] * 4.25); float16x4_t _r0tm1 = vadd_f16(_tmp12a, _tmp12b); float16x4_t _r0tm2 = vsub_f16(_tmp12a, _tmp12b); // r0_tm[1] = tmp12a + tmp12b; // r0_tm[2] = tmp12a - tmp12b; float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); // float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25); // float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2); float16x4_t _r0tm3 = vadd_f16(_tmp34a, _tmp34b); float16x4_t _r0tm4 = vsub_f16(_tmp34a, _tmp34b); // r0_tm[3] = tmp34a + tmp34b; // r0_tm[4] = tmp34a - tmp34b; float16x4_t _tmp56a = vfma_n_f16(_tmp06, vfms_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); // float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4); // float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5); float16x4_t _r0tm5 = vadd_f16(_tmp56a, _tmp56b); float16x4_t _r0tm6 = vsub_f16(_tmp56a, _tmp56b); // r0_tm[5] = tmp56a + tmp56b; // r0_tm[6] = tmp56a - tmp56b; vst1_f16(r0_tm_0, _r0tm0); vst1_f16(r0_tm_1, _r0tm1); vst1_f16(r0_tm_2, _r0tm2); vst1_f16(r0_tm_3, _r0tm3); vst1_f16(r0_tm_4, _r0tm4); vst1_f16(r0_tm_5, _r0tm5); vst1_f16(r0_tm_6, _r0tm6); vst1_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 2u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 7 < tiles; i += 8) { __fp16* tm2p = tm2.row<__fp16>(i / 8); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i + 3 < tiles; i += 4) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n" "st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); r0 += bottom_blob_tm.cstep * 4; } } for (; i < tiles; i++) { __fp16* tm2p = tm2.row<__fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #64] \n" "ld1 {v0.4h}, [%0] \n" "st1 {v0.4h}, [%1], #8 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); r0 += bottom_blob_tm.cstep * 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 2u * elempack, elempack, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; __fp16* output0_tm = top_blob_tm.channel(p); __fp16* output1_tm = top_blob_tm.channel(p + 1); const Mat kernel01_tm = kernel_tm.channel(pp); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v28.8h, v4.8h, v0.h[4] \n" "fmla v29.8h, v4.8h, v0.h[5] \n" "fmla v30.8h, v4.8h, v0.h[6] \n" "fmla v31.8h, v4.8h, v0.h[7] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v28.8h, v5.8h, v1.h[4] \n" "fmla v29.8h, v5.8h, v1.h[5] \n" "fmla v30.8h, v5.8h, v1.h[6] \n" "fmla v31.8h, v5.8h, v1.h[7] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "fmla v28.8h, v6.8h, v2.h[4] \n" "fmla v29.8h, v6.8h, v2.h[5] \n" "fmla v30.8h, v6.8h, v2.h[6] \n" "fmla v31.8h, v6.8h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "fmla v28.8h, v7.8h, v3.h[4] \n" "fmla v29.8h, v7.8h, v3.h[5] \n" "fmla v30.8h, v7.8h, v3.h[6] \n" "fmla v31.8h, v7.8h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "ext v28.16b, v28.16b, v28.16b, #8 \n" "ext v29.16b, v29.16b, v29.16b, #8 \n" "ext v30.16b, v30.16b, v30.16b, #8 \n" "ext v31.16b, v31.16b, v31.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%3], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%4], #64 \n" // k0123 "fmla v24.8h, v4.8h, v0.h[0] \n" "fmla v25.8h, v4.8h, v0.h[1] \n" "fmla v26.8h, v4.8h, v0.h[2] \n" "fmla v27.8h, v4.8h, v0.h[3] \n" "fmla v24.8h, v5.8h, v1.h[0] \n" "fmla v25.8h, v5.8h, v1.h[1] \n" "fmla v26.8h, v5.8h, v1.h[2] \n" "fmla v27.8h, v5.8h, v1.h[3] \n" "fmla v24.8h, v6.8h, v2.h[0] \n" "fmla v25.8h, v6.8h, v2.h[1] \n" "fmla v26.8h, v6.8h, v2.h[2] \n" "fmla v27.8h, v6.8h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v7.8h, v3.h[0] \n" "fmla v25.8h, v7.8h, v3.h[1] \n" "fmla v26.8h, v7.8h, v3.h[2] \n" "fmla v27.8h, v7.8h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "ext v24.16b, v24.16b, v24.16b, #8 \n" "ext v25.16b, v25.16b, v25.16b, #8 \n" "ext v26.16b, v26.16b, v26.16b, #8 \n" "ext v27.16b, v27.16b, v27.16b, #8 \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(r0), // %3 "=r"(kptr) // %4 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(r0), "4"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel01_tm.row<const __fp16>(r); float16x8_t _sum0 = vdupq_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x8_t _k0 = vld1q_f16(kptr); float16x8_t _k1 = vld1q_f16(kptr + 8); float16x8_t _k2 = vld1q_f16(kptr + 16); float16x8_t _k3 = vld1q_f16(kptr + 24); _sum0 = vfmaq_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfmaq_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfmaq_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfmaq_lane_f16(_sum0, _k3, _r0, 3); kptr += 32; r0 += 4; } vst1_f16(output0_tm, vget_low_f16(_sum0)); vst1_f16(output1_tm, vget_high_f16(_sum0)); output0_tm += 4; output1_tm += 4; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 2 + p % 2); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 7 < tiles; i += 8) { const __fp16* r0 = bb2.row<const __fp16>(i / 8); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v28.4h, v4.4h, v0.h[4] \n" "fmla v29.4h, v4.4h, v0.h[5] \n" "fmla v30.4h, v4.4h, v0.h[6] \n" "fmla v31.4h, v4.4h, v0.h[7] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v28.4h, v5.4h, v1.h[4] \n" "fmla v29.4h, v5.4h, v1.h[5] \n" "fmla v30.4h, v5.4h, v1.h[6] \n" "fmla v31.4h, v5.4h, v1.h[7] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "fmla v28.4h, v6.4h, v2.h[4] \n" "fmla v29.4h, v6.4h, v2.h[5] \n" "fmla v30.4h, v6.4h, v2.h[6] \n" "fmla v31.4h, v6.4h, v2.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "fmla v28.4h, v7.4h, v3.h[4] \n" "fmla v29.4h, v7.4h, v3.h[5] \n" "fmla v30.4h, v7.4h, v3.h[6] \n" "fmla v31.4h, v7.4h, v3.h[7] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < tiles; i += 4) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); int nn = inch; // inch always > 0 asm volatile( "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n" // r01 r23 r45 r67 "prfm pldl1keep, [%3, #256] \n" "ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%3], #32 \n" // k0123 "fmla v24.4h, v4.4h, v0.h[0] \n" "fmla v25.4h, v4.4h, v0.h[1] \n" "fmla v26.4h, v4.4h, v0.h[2] \n" "fmla v27.4h, v4.4h, v0.h[3] \n" "fmla v24.4h, v5.4h, v1.h[0] \n" "fmla v25.4h, v5.4h, v1.h[1] \n" "fmla v26.4h, v5.4h, v1.h[2] \n" "fmla v27.4h, v5.4h, v1.h[3] \n" "fmla v24.4h, v6.4h, v2.h[0] \n" "fmla v25.4h, v6.4h, v2.h[1] \n" "fmla v26.4h, v6.4h, v2.h[2] \n" "fmla v27.4h, v6.4h, v2.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v7.4h, v3.h[0] \n" "fmla v25.4h, v7.4h, v3.h[1] \n" "fmla v26.4h, v7.4h, v3.h[2] \n" "fmla v27.4h, v7.4h, v3.h[3] \n" "bne 0b \n" "st1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(r0), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(output0_tm), "2"(r0), "3"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v24", "v25", "v26", "v27"); } for (; i < tiles; i++) { const __fp16* r0 = bb2.row<const __fp16>(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel0_tm.row<const __fp16>(r); float16x4_t _sum0 = vdup_n_f16(0.f); for (int q = 0; q < inch; q++) { float16x4_t _r0 = vld1_f16(r0); float16x4_t _k0 = vld1_f16(kptr); float16x4_t _k1 = vld1_f16(kptr + 4); float16x4_t _k2 = vld1_f16(kptr + 8); float16x4_t _k3 = vld1_f16(kptr + 12); _sum0 = vfma_lane_f16(_sum0, _k0, _r0, 0); _sum0 = vfma_lane_f16(_sum0, _k1, _r0, 1); _sum0 = vfma_lane_f16(_sum0, _k2, _r0, 2); _sum0 = vfma_lane_f16(_sum0, _k3, _r0, 3); kptr += 16; r0 += 4; } vst1_f16(output0_tm, _sum0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 2u * 4, 4, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; float16x4_t _bias0 = bias ? vld1_f16((const __fp16*)bias + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tm / 8 + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; // TODO neon optimize for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); // float tmp024a = output0_tm[1] + output0_tm[2]; // float tmp135a = output0_tm[1] - output0_tm[2]; float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); // float tmp024b = output0_tm[3] + output0_tm[4]; // float tmp135b = output0_tm[3] - output0_tm[4]; float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); // float tmp024c = output0_tm[5] + output0_tm[6]; // float tmp135c = output0_tm[5] - output0_tm[6]; float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[4][m], _tmp4m); // tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32; // tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8; // tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[5][m], _tmp5m); // tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16; // tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4; // tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c; output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); // float tmp024a = tmp0[1] + tmp0[2]; // float tmp135a = tmp0[1] - tmp0[2]; float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); // float tmp024b = tmp0[3] + tmp0[4]; // float tmp135b = tmp0[3] - tmp0[4]; float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); // float tmp024c = tmp0[5] + tmp0[6]; // float tmp135c = tmp0[5] - tmp0[6]; float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); vst1_f16(output0, _out00); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 16, _out04); // output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32; // output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8; // output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c; float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 20, _out05); // output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16; // output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4; // output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c; output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
concurrent-computing.c
#include <stdio.h> #include <omp.h> int executeTask(int j){ const char *str[] = { "Enjoy", "Rosetta", "Code" }; for (int i = 0; i < 3; i++) printf("%d %s \n",j, str[i]); return j; } int main() { volatile int r; #pragma omp parallel for num_threads(3) for ( int j = 0; j < 1000000; ++j) { r = executeTask(j); } return 0; }
pair_dist.c
/* -*- mode: c; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /********************************************************************* * Clustal Omega - Multiple sequence alignment * * Copyright (C) 2010 University College Dublin * * Clustal-Omega is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is part of Clustal-Omega. * ********************************************************************/ /* * RCS $Id: pair_dist.c 301 2016-06-13 13:32:55Z fabian $ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdlib.h> #include <ctype.h> #include <assert.h> #include <time.h> /* only neededfor iNumberOfThreads */ #include "clustal-omega.h" #include "ktuple_pair.h" #include "pair_dist.h" #include "progress.h" #include "util.h" /* Made iend/jend const unsigned long int (originally just int), FS, 2016-04-04 */ /* Up to rev 173 we had a USE_SYM_KTUPLE switch implemented here. When active * ktuple distances were computed twice for each pair and averaged. Idea was * to avoid assymmetries in the pairwise scores (score(a, b) is often not the * same as score(b, a)). Results on BAliBASE indicate that this is overkill: * * r92_default core columns: avg-sp=0.800656 avg-tc=0.47711 (of total 218) * r93-mod--norm-ktuple/ core columns: avg-sp=0.800656 avg-tc=0.47711 (of total 218) * r93-mod--sym-ktuple/ core columns: avg-sp=0.801083 avg-tc=0.476544 (of total 217) * r93-mod--rand-ktuple-1 core columns: avg-sp=0.799289 avg-tc=0.468028 (of total 218) * r93-mod--rand-ktuple-2 core columns: avg-sp=0.801654 avg-tc=0.47659 (of total 217) * r93-mod--rand-ktuple-3 core columns: avg-sp=0.800234 avg-tc=0.474908 (of total 218) * r93-mod--rand-ktuple-4 core columns: avg-sp=0.800573 avg-tc=0.476514 (of total 218) * r93-mod--rand-ktuple-5 core columns: avg-sp=0.799679 avg-tc=0.468716 (of total 218) * */ static double KimuraCorrection(double frac_id); static int SquidIdPairDist(symmatrix_t *tmat, mseq_t *mseq, int istart, const unsigned long int iend, int jstart, const unsigned long int jend, bool use_KimuraCorrection, progress_t *prProgress, unsigned long int *ulStepNo, unsigned long int ulTotalStepNo); /* Taken from Muscle's msadistkimura.cpp */ static int DAYHOFF_PAMS[]={ 195, /* 75.0% observed d; 195 PAMs estimated = 195% estimated d */ 196, /* 75.1% observed d; 196 PAMs estimated */ 197, 198, 199, 200, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 226, 227, 228, 229, 230, 231, 232, 233, 234, 236, 237, 238, 239, 240, 241, 243, 244, 245, 246, 248, 249, 250, /* 250 PAMs = 80.3% observed d */ 252, 253, 254, 255, 257, 258, 260, 261, 262, 264, 265, 267, 268, 270, 271, 273, 274, 276, 277, 279, 281, 282, 284, 285, 287, 289, 291, 292, 294, 296, 298, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 328, 330, 332, 335, 337, 339, 342, 344, 347, 349, 352, 354, 357, 360, 362, 365, 368, 371, 374, 377, 380, 383, 386, 389, 393, 396, 399, 403, 407, 410, 414, 418, 422, 426, 430, 434, 438, 442, 447, 451, 456, 461, 466, 471, 476, 482, 487, 493, 498, 504, 511, 517, 524, 531, 538, 545, 553, 560, 569, 577, 586, 595, 605, 615, 626, 637, 649, 661, 675, 688, 703, 719, 736, 754, 775, 796, 819, 845, 874, 907, 945, /* 92.9% observed; 945 PAMs */ 988 /* 93.0% observed; 988 PAMs */ }; static int DAYHOFF_TABLE_ENTRIES = sizeof(DAYHOFF_PAMS)/sizeof(DAYHOFF_PAMS[0]); /** * * @brief Compute Kimura corrected distance. * * Original Muscle documentation following: * """ * This is defined to be: * log_e(1 - p - p*p/5) * where p is the fraction of residues that differ, i.e.: * p = (1 - fractional_conservation) * This measure is infinite for p = 0.8541 and is considered * unreliable for p >= 0.75 (according to the ClustalW docs). * ClustalW uses a table lookup for values > 0.75. The following table * was copied from the ClustalW file dayhoff.h. * """ * * @note copied from Muscle's msadistkimura.cpp:KimuraDist() * * @warning For protein only (uses Dayhoff substitution parameters) * * @param[in] p * distance, e.g. 1.0 - fractional/relative identity * * @return The Kimura corrected distance * */ double KimuraCorrection(double p) { int table_index; /* Typical case: use Kimura's empirical formula */ if (p < 0.75) return -log(1 - p - (p*p)/5); /* Per ClustalW, return 10.0 for anything over 93% */ if (p > 0.93) return 10.0; /* If 0.75 >= p <= 0.93, use table lookup */ table_index = (int) ((p - 0.75)*1000 + 0.5); if (table_index < 0 || table_index >= DAYHOFF_TABLE_ENTRIES) Log(&rLog, LOG_FATAL, "Internal error in %s:%s", __FILE__, __FUNCTION__); return DAYHOFF_PAMS[table_index] / 100.0; } /*** end: KimuraCorrection() ***/ /** * @brief Compute distances between all aligned sequence pairs using * squid's PairwiseIdentity, which is: idents / MIN(len1, len2) * * @param[out] tmat * Where to store the computed distances * @param[in] mseq * The aligned sequences * @param[in] istart * For distances [i][j] i>=istart, i<j * @param[in] iend * For distances [i][j] i<iend, i<j * @param[in] jstart * For distances [i][j] j>=jstart, i<j * @param[in] jend * For distances [i][j] i<j<jend, i<j * @param[in] use_kimura * Use Kimura corrected values (Proteins only) * * @return Non-zero on error * */ int SquidIdPairDist(symmatrix_t *tmat, mseq_t *mseq, int istart, const unsigned long int iend, int jstart, const unsigned long int jend, bool use_kimura, progress_t *prProgress, unsigned long int *ulStepNo, unsigned long int ulTotalStepNo) { int i, j; /* aux */ /* progress_t *prProgress; */ bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; /* unsigned long int ulStepNo; unsigned long ulTotalStepNo; */ assert(NULL != tmat); assert(NULL != mseq); if (TRUE != mseq->aligned) { Log(&rLog, LOG_ERROR, "Sequences need to be aligned (%s)", __FUNCTION__); return -1; } if (SEQTYPE_PROTEIN != mseq->seqtype && TRUE == use_kimura) { Log(&rLog, LOG_WARN, "Using Kimura distance corretion which includes Dayhoff substitution table lookup for non-protein sequences"); } NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Pairwise distance calculation progress", bPrintCR); /* estimation of total number of steps (if istart and jstart are * both 0) */ /* ulTotalStepNo = iend*jend - iend*iend/2 + iend/2; ulStepNo = 0; */ /*LOG_DEBUG("istart=%d iend=%d jstart=%d jend=%d", istart, iend, jstart, jend);*/ for (i=istart; i<iend; ++i) { /* by definition a sequence compared to itself should give a score of 0 */ SymMatrixSetValue(tmat, i, i, 0.0); #ifdef HAVE_OPENMP #pragma omp critical(squidid) #endif { ProgressLog(prProgress, *ulStepNo, ulTotalStepNo, FALSE); } for (j=MAX(i+1, jstart); j<jend; ++j) { float dist; dist = 1.0 - PairwiseIdentity(mseq->seq[i], mseq->seq[j]); #ifdef HAVE_OPENMP #pragma omp atomic #endif (*ulStepNo)++; /*LOG_DEBUG("%d:%d raw dist = %f", i, j, dist);*/ if (use_kimura) { dist = KimuraCorrection(dist); /*LOG_DEBUG("cor dist = %f", dist);*/ } SymMatrixSetValue(tmat, i, j, dist); #ifdef HAVE_OPENMP #pragma omp critical(squidid) #endif { Log(&rLog, LOG_DEBUG, "Aligned distance for sequence pair %d:%d= %lg", i+1, j+1, dist); } } } return 0; } /*** end: SquidIdPairDist() ***/ /** * @brief compute or read precomputed distances for given sequences * * @param[out] distmat * Distances will be written to this matrix. will be allocated here as * well. Caller must free with FreeSymMatrix() * @param[in] mseq * Distances will be computed for these sequences * @param[in] pairdist_type * Type of pairwise distance comparison * @param[in] fdist_in * If not NULL, sequences will be written from this file instead of * computing them * @param[in] istart * Compute distances for sequences i:j, i>=istart, i<j. * Usually 0. * @param[in] iend * Compute distances for sequences i:j, i<iend, i<j * Usually mseq->nseqs. * @param[in] jstart * Compute distances for sequences i:j, j>=jstart, i<j * Usually 0. * @param[in] jend * Compute distances for sequences i:j, j<iend, i<j * Usually mseq->nseqs. * @param[in] fdist_out * If not NULL, distances will be written to this files * * */ int PairDistances(symmatrix_t **distmat, mseq_t *mseq, int pairdist_type, bool bPercID, int istart, const unsigned long int iend, int jstart, const unsigned long int jend, char *fdist_in, char *fdist_out) { int uSeqIndex; unsigned long int ulStepNo = 0, ulTotalStepNo; /* DD: moved from SquidIdPairDist so progress bar works multithreaded */ int iChunk, iChunkStart, iChunkEnd; int iChunkStarts[iNumberOfThreads]; int iChunkEnds[iNumberOfThreads]; progress_t *prProgress = NULL; int iSquidSuccess = 0; bool bPrintCR = (rLog.iLogLevelEnabled<=LOG_VERBOSE) ? FALSE : TRUE; assert(NULL!=distmat); assert(NULL!=mseq); assert(istart<iend); assert(jstart<jend); /* compute pairwise distances or read from file * */ #if 0 #include "random-dist.h" #else if (NULL != fdist_in) { Log(&rLog, LOG_WARN, "Please use distance matrix input only, if you know exactly what you're doing!"); if (SymMatrixRead(fdist_in, distmat, mseq)) { Log(&rLog, LOG_FATAL, "%s", "Reading distance matrix failed"); } } else { if (NewSymMatrix(distmat, iend, jend)!=0) { Log(&rLog, LOG_FATAL, "%s", "Memory allocation for distance matrix failed"); } /* break into chunks, one for each thread matrix is a triangle, not a square hence making even chunk sizes is slightly fiddlier */ ulTotalStepNo = iend*jend - iend*iend/2 + iend/2; /* FIXME: can get rid of iChunkStart, iChunkEnd now that we're using the arrays */ iChunkStart = iend; for(iChunk = 0; iChunk <= iNumberOfThreads; iChunk++) { iChunkEnd = iChunkStart; if (iChunk == iNumberOfThreads - 1){ iChunkStart = 0; } else if (iend == jend){ iChunkStart = iend - ((double)(iend - istart) * sqrt(((double)iChunk + 1.0)/(double)iNumberOfThreads)); } else { iChunkStart = iend - (iend - istart) * (iChunk + 1) / (double)(iNumberOfThreads); } iChunkStarts[iChunk] = iChunkStart; iChunkEnds[iChunk] = iChunkEnd; /*printf("%s:%d: C=%d, ie=%d, is=%d, je=%d, js=%d, Cstart=%d, Cend=%d, diff=%d\n", __FILE__, __LINE__, iChunk, iend, istart, jend, jstart, iChunkStart, iChunkEnd, iChunkEnd-iChunkStart);*/ } if (PAIRDIST_KTUPLE == pairdist_type) { Log(&rLog, LOG_INFO, "Calculating pairwise ktuple-distances..."); NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Ktuple-distance calculation progress", bPrintCR); #ifdef HAVE_OPENMP #pragma omp parallel for private(iChunk) schedule(dynamic) #endif for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++) { KTuplePairDist((*distmat), mseq, iChunkStarts[iChunk], iChunkEnds[iChunk], jstart, jend, NULL, prProgress, &ulStepNo, ulTotalStepNo); } #if 0 printf("total ops %d\n", ulStepNo); #endif /* old format: KTuplePairDist((*distmat), mseq, istart, iend, jstart, jend, NULL); */ } else if (PAIRDIST_SQUIDID == pairdist_type) { Log(&rLog, LOG_INFO, "Calculating pairwise aligned identity distances..."); NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Pairwise identity calculation progress", bPrintCR); #ifdef HAVE_OPENMP #pragma omp parallel for private(iChunk) schedule(dynamic) #endif for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++) { iSquidSuccess = SquidIdPairDist((*distmat), mseq, iChunkStarts[iChunk], iChunkEnds[iChunk], jstart, jend, FALSE, prProgress, &ulStepNo, ulTotalStepNo); } if(iSquidSuccess != 0) return -1; } else if (PAIRDIST_SQUIDID_KIMURA == pairdist_type) { Log(&rLog, LOG_INFO, "Calculating Kimura-corrected pairwise aligned identity distances..."); NewProgress(&prProgress, LogGetFP(&rLog, LOG_INFO), "Pairwise identity calculation progress", bPrintCR); #ifdef HAVE_OPENMP #pragma omp parallel for private(iChunk) schedule(dynamic) #endif for(iChunk = 0; iChunk < iNumberOfThreads; iChunk++) { iSquidSuccess = SquidIdPairDist((*distmat), mseq, iChunkStarts[iChunk], iChunkEnds[iChunk], jstart, jend, TRUE, prProgress, &ulStepNo, ulTotalStepNo); } if(iSquidSuccess != 0) return -1; } else { Log(&rLog, LOG_FATAL, "INTERNAL ERROR: don't know about pairdist_type %d", pairdist_type); } } #endif /* random/proper distance calculation */ /* optional printing of matrix to file */ if (NULL != fdist_out) { /* need a copy of sequence names for printing */ char **names; names = (char **)CKMALLOC(mseq->nseqs * sizeof(char*)); for (uSeqIndex=0; uSeqIndex<mseq->nseqs; uSeqIndex++) { names[uSeqIndex] = mseq->sqinfo[uSeqIndex].name; } SymMatrixPrint((*distmat), names, fdist_out, bPercID); Log(&rLog, LOG_INFO, "Pairwise distance matrix written to %s", fdist_out); CKFREE(names); } #if 0 #include "distance-distrib.h" #endif if (NULL != prProgress) { ProgressDone(prProgress); FreeProgress(&prProgress); } return 0; } /*** end: PairDistances() ***/
Matrix.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matrix - Matrix stored and accessible by rows. Indices and values for * the matrix nonzeros are copied into the matrix a row at a time, in any * order using the MatrixGetRow function. The MatrixPutRow function returns * a pointer to the indices and values of a row. The matrix has a set of * row and column indices such that these indices begin at "beg" and end * at "end", where 0 <= "beg" <= "end". In other words, the matrix indices * have any nonnegative base value, and the base values of the row and column * indices must agree. * *****************************************************************************/ #include <stdlib.h> //#include <memory.h> #include "Common.h" #include "Matrix.h" #include "Numbering.h" #define MAX_NZ_PER_ROW 1000 /*-------------------------------------------------------------------------- * MatrixCreate - Return (a pointer to) a matrix object. *--------------------------------------------------------------------------*/ Matrix *MatrixCreate(MPI_Comm comm, HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows, mype, npes; Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST); mat->comm = comm; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); mat->beg_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); mat->end_rows = (HYPRE_Int *) MemAlloc(mat->mem, npes * sizeof(HYPRE_Int)); hypre_MPI_Allgather(&beg_row, 1, HYPRE_MPI_INT, mat->beg_rows, 1, HYPRE_MPI_INT, comm); hypre_MPI_Allgather(&end_row, 1, HYPRE_MPI_INT, mat->end_rows, 1, HYPRE_MPI_INT, comm); mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixCreateLocal - Return (a pointer to) a matrix object. * The matrix created by this call is a local matrix, not a global matrix. *--------------------------------------------------------------------------*/ Matrix *MatrixCreateLocal(HYPRE_Int beg_row, HYPRE_Int end_row) { HYPRE_Int num_rows; Matrix *mat = hypre_TAlloc(Matrix, 1, HYPRE_MEMORY_HOST); mat->comm = hypre_MPI_COMM_NULL; mat->beg_row = beg_row; mat->end_row = end_row; mat->mem = (Mem *) MemCreate(); num_rows = mat->end_row - mat->beg_row + 1; mat->lens = (HYPRE_Int *) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int)); mat->inds = (HYPRE_Int **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Int *)); mat->vals = (HYPRE_Real **) MemAlloc(mat->mem, num_rows * sizeof(HYPRE_Real *)); /* Send beg_row and end_row to all processors */ /* This is needed in order to map row numbers to processors */ mat->beg_rows = NULL; mat->end_rows = NULL; mat->num_recv = 0; mat->num_send = 0; mat->recv_req = NULL; mat->send_req = NULL; mat->recv_req2 = NULL; mat->send_req2 = NULL; mat->statuses = NULL; mat->sendind = NULL; mat->sendbuf = NULL; mat->recvbuf = NULL; mat->numb = NULL; return mat; } /*-------------------------------------------------------------------------- * MatrixDestroy - Destroy a matrix object "mat". *--------------------------------------------------------------------------*/ void MatrixDestroy(Matrix *mat) { HYPRE_Int i; for (i=0; i<mat->num_recv; i++) hypre_MPI_Request_free(&mat->recv_req[i]); for (i=0; i<mat->num_send; i++) hypre_MPI_Request_free(&mat->send_req[i]); for (i=0; i<mat->num_send; i++) hypre_MPI_Request_free(&mat->recv_req2[i]); for (i=0; i<mat->num_recv; i++) hypre_MPI_Request_free(&mat->send_req2[i]); hypre_TFree(mat->recv_req,HYPRE_MEMORY_HOST); hypre_TFree(mat->send_req,HYPRE_MEMORY_HOST); hypre_TFree(mat->recv_req2,HYPRE_MEMORY_HOST); hypre_TFree(mat->send_req2,HYPRE_MEMORY_HOST); hypre_TFree(mat->statuses,HYPRE_MEMORY_HOST); hypre_TFree(mat->sendind,HYPRE_MEMORY_HOST); hypre_TFree(mat->sendbuf,HYPRE_MEMORY_HOST); hypre_TFree(mat->recvbuf,HYPRE_MEMORY_HOST); MemDestroy(mat->mem); if (mat->numb) NumberingDestroy(mat->numb); hypre_TFree(mat,HYPRE_MEMORY_HOST); } /*-------------------------------------------------------------------------- * MatrixSetRow - Set a row in a matrix. Only local rows can be set. * Once a row has been set, it should not be set again, or else the * memory used by the existing row will not be recovered until * the matrix is destroyed. "row" is in global coordinate numbering. *--------------------------------------------------------------------------*/ void MatrixSetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int len, HYPRE_Int *ind, HYPRE_Real *val) { row -= mat->beg_row; mat->lens[row] = len; mat->inds[row] = (HYPRE_Int *) MemAlloc(mat->mem, len*sizeof(HYPRE_Int)); mat->vals[row] = (HYPRE_Real *) MemAlloc(mat->mem, len*sizeof(HYPRE_Real)); if (ind != NULL) { //hypre_TMemcpy(mat->inds[row], ind, HYPRE_Int, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); memcpy(mat->inds[row], ind, sizeof(HYPRE_Int) * len); } if (val != NULL) { //hypre_TMemcpy(mat->vals[row], val, HYPRE_Real, len, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); memcpy(mat->vals[row], val, sizeof(HYPRE_Real) * len); } } /*-------------------------------------------------------------------------- * MatrixGetRow - Get a *local* row in a matrix. *--------------------------------------------------------------------------*/ void MatrixGetRow(Matrix *mat, HYPRE_Int row, HYPRE_Int *lenp, HYPRE_Int **indp, HYPRE_Real **valp) { *lenp = mat->lens[row]; *indp = mat->inds[row]; *valp = mat->vals[row]; } /*-------------------------------------------------------------------------- * MatrixRowPe - Map "row" to a processor number. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixRowPe(Matrix *mat, HYPRE_Int row) { HYPRE_Int npes, pe; HYPRE_Int *beg = mat->beg_rows; HYPRE_Int *end = mat->end_rows; hypre_MPI_Comm_size(mat->comm, &npes); for (pe=0; pe<npes; pe++) { if (row >= beg[pe] && row <= end[pe]) return pe; } hypre_printf("MatrixRowPe: could not map row %d.\n", row); PARASAILS_EXIT; return -1; /* for picky compilers */ } /*-------------------------------------------------------------------------- * MatrixNnz - Return total number of nonzeros in preconditioner. *--------------------------------------------------------------------------*/ HYPRE_Int MatrixNnz(Matrix *mat) { HYPRE_Int num_local, i, total, alltotal; num_local = mat->end_row - mat->beg_row + 1; total = 0; for (i=0; i<num_local; i++) total += mat->lens[i]; hypre_MPI_Allreduce(&total, &alltotal, 1, HYPRE_MPI_INT, hypre_MPI_SUM, mat->comm); return alltotal; } /*-------------------------------------------------------------------------- * MatrixPrint - Print a matrix to a file "filename". Each processor * appends to the file in order, but the file is overwritten if it exists. *--------------------------------------------------------------------------*/ void MatrixPrint(Matrix *mat, char *filename) { HYPRE_Int mype, npes, pe; HYPRE_Int row, i, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); for (pe=0; pe<npes; pe++) { hypre_MPI_Barrier(mat->comm); if (mype == pe) { FILE *file = fopen(filename, (pe==0 ? "w" : "a")); hypre_assert(file != NULL); for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i=0; i<len; i++) hypre_fprintf(file, "%d %d %.14e\n", row + mat->beg_row, mat->numb->local_to_global[ind[i]], val[i]); } fclose(file); } } } /*-------------------------------------------------------------------------- * MatrixReadMaster - MatrixRead routine for processor 0. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadMaster(Matrix *mat, char *filename) { MPI_Comm comm = mat->comm; HYPRE_Int mype, npes; FILE *file; HYPRE_Int ret; HYPRE_Int num_rows, curr_proc; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; hypre_longint outbuf; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; char line[100]; HYPRE_Int oldrow; hypre_MPI_Request request; hypre_MPI_Status status; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); file = fopen(filename, "r"); hypre_assert(file != NULL); if (fgets(line, 100, file) == NULL) { hypre_fprintf(stderr, "Error reading file.\n"); PARASAILS_EXIT; } #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row=0; row<num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif offset = ftell(file); hypre_fscanf(file, "%d %d %lf", &row, &col, &value); request = hypre_MPI_REQUEST_NULL; curr_proc = 1; /* proc for which we are looking for the beginning */ while (curr_proc < npes) { if (row == mat->beg_rows[curr_proc]) { hypre_MPI_Wait(&request, &status); outbuf = offset; hypre_MPI_Isend(&outbuf, 1, hypre_MPI_LONG, curr_proc, 0, comm, &request); curr_proc++; } offset = ftell(file); oldrow = row; hypre_fscanf(file, "%d %d %lf", &row, &col, &value); if (oldrow > row) { hypre_fprintf(stderr, "Matrix file is not sorted by rows.\n"); PARASAILS_EXIT; } } /* Now read our own part */ rewind(file); if (fgets(line, 100, file) == NULL) { hypre_fprintf(stderr, "Error reading file.\n"); PARASAILS_EXIT; } #ifdef EMSOLVE ret = hypre_sscanf(line, "%*d %d %*d %*d", &num_rows); for (row=0; row<num_rows; row++) hypre_fscanf(file, "%*d"); #else ret = hypre_sscanf(line, "%d %*d %*d", &num_rows); #endif ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); hypre_MPI_Wait(&request, &status); } /*-------------------------------------------------------------------------- * MatrixReadSlave - MatrixRead routine for other processors. Internal use. *--------------------------------------------------------------------------*/ static void MatrixReadSlave(Matrix *mat, char *filename) { MPI_Comm comm = mat->comm; hypre_MPI_Status status; HYPRE_Int mype; FILE *file; HYPRE_Int ret; HYPRE_Int row, col; HYPRE_Real value; hypre_longint offset; HYPRE_Int curr_row; HYPRE_Int len; HYPRE_Int ind[MAX_NZ_PER_ROW]; HYPRE_Real val[MAX_NZ_PER_ROW]; HYPRE_Real time0, time1; file = fopen(filename, "r"); hypre_assert(file != NULL); hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Recv(&offset, 1, hypre_MPI_LONG, 0, 0, comm, &status); time0 = hypre_MPI_Wtime(); ret = fseek(file, offset, SEEK_SET); hypre_assert(ret == 0); ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); curr_row = row; len = 0; while (ret != EOF && row <= mat->end_row) { if (row != curr_row) { /* store this row */ MatrixSetRow(mat, curr_row, len, ind, val); curr_row = row; /* reset row pointer */ len = 0; } if (len >= MAX_NZ_PER_ROW) { hypre_fprintf(stderr, "The matrix has exceeded %d\n", MAX_NZ_PER_ROW); hypre_fprintf(stderr, "nonzeros per row. Internal buffers must be\n"); hypre_fprintf(stderr, "increased to continue.\n"); PARASAILS_EXIT; } ind[len] = col; val[len] = value; len++; ret = hypre_fscanf(file, "%d %d %lf", &row, &col, &value); } /* Store the final row */ if (ret == EOF || row > mat->end_row) MatrixSetRow(mat, mat->end_row, len, ind, val); fclose(file); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for slave read: %f\n", mype, time1-time0); } /*-------------------------------------------------------------------------- * MatrixRead - Read a matrix file "filename" from disk and store in the * matrix "mat" which has already been created using MatrixCreate. The format * assumes no nonzero rows, the rows are in order, and there will be at least * one row per processor. *--------------------------------------------------------------------------*/ void MatrixRead(Matrix *mat, char *filename) { HYPRE_Int mype; HYPRE_Real time0, time1; hypre_MPI_Comm_rank(mat->comm, &mype); time0 = hypre_MPI_Wtime(); if (mype == 0) MatrixReadMaster(mat, filename); else MatrixReadSlave(mat, filename); time1 = hypre_MPI_Wtime(); hypre_printf("%d: Time for reading matrix: %f\n", mype, time1-time0); MatrixComplete(mat); } /*-------------------------------------------------------------------------- * RhsRead - Read a right-hand side file "filename" from disk and store in the * location pointed to by "rhs". "mat" is needed to provide the partitioning * information. The expected format is: a header line (n, nrhs) followed * by n values. Also allows isis format, indicated by 1 HYPRE_Int in first line. *--------------------------------------------------------------------------*/ void RhsRead(HYPRE_Real *rhs, Matrix *mat, char *filename) { FILE *file; hypre_MPI_Status status; HYPRE_Int mype, npes; HYPRE_Int num_rows, num_local, pe, i, converted; HYPRE_Real *buffer = NULL; HYPRE_Int buflen = 0; char line[100]; HYPRE_Int dummy; hypre_MPI_Comm_size(mat->comm, &npes); hypre_MPI_Comm_rank(mat->comm, &mype); num_local = mat->end_row - mat->beg_row + 1; if (mype != 0) { hypre_MPI_Recv(rhs, num_local, hypre_MPI_REAL, 0, 0, mat->comm, &status); return; } file = fopen(filename, "r"); hypre_assert(file != NULL); if (fgets(line, 100, file) == NULL) { hypre_fprintf(stderr, "Error reading file.\n"); PARASAILS_EXIT; } converted = hypre_sscanf(line, "%d %d", &num_rows, &dummy); hypre_assert(num_rows == mat->end_rows[npes-1]); /* Read own rows first */ for (i=0; i<num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &rhs[i]); else hypre_fscanf(file, "%lf", &rhs[i]); for (pe=1; pe<npes; pe++) { num_local = mat->end_rows[pe] - mat->beg_rows[pe]+ 1; if (buflen < num_local) { hypre_TFree(buffer,HYPRE_MEMORY_HOST); buflen = num_local; buffer = hypre_TAlloc(HYPRE_Real, buflen , HYPRE_MEMORY_HOST); } for (i=0; i<num_local; i++) if (converted == 1) /* isis format */ hypre_fscanf(file, "%*d %lf", &buffer[i]); else hypre_fscanf(file, "%lf", &buffer[i]); hypre_MPI_Send(buffer, num_local, hypre_MPI_REAL, pe, 0, mat->comm); } hypre_TFree(buffer,HYPRE_MEMORY_HOST); } /*-------------------------------------------------------------------------- * SetupReceives *--------------------------------------------------------------------------*/ static void SetupReceives(Matrix *mat, HYPRE_Int reqlen, HYPRE_Int *reqind, HYPRE_Int *outlist) { HYPRE_Int i, j, this_pe, mype; hypre_MPI_Request request; MPI_Comm comm = mat->comm; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; hypre_MPI_Comm_rank(comm, &mype); mat->num_recv = 0; /* Allocate recvbuf */ /* recvbuf has numlocal entires saved for local part of x, used in matvec */ mat->recvlen = reqlen; /* used for the transpose multiply */ mat->recvbuf = hypre_TAlloc(HYPRE_Real, (reqlen+num_local) , HYPRE_MEMORY_HOST); for (i=0; i<reqlen; i=j) /* j is set below */ { /* The processor that owns the row with index reqind[i] */ this_pe = MatrixRowPe(mat, reqind[i]); /* Figure out other rows we need from this_pe */ for (j=i+1; j<reqlen; j++) { /* if row is on different pe */ if (reqind[j] < mat->beg_rows[this_pe] || reqind[j] > mat->end_rows[this_pe]) break; } /* Request rows in reqind[i..j-1] */ hypre_MPI_Isend(&reqind[i], j-i, HYPRE_MPI_INT, this_pe, 444, comm, &request); hypre_MPI_Request_free(&request); /* Count of number of number of indices needed from this_pe */ outlist[this_pe] = j-i; hypre_MPI_Recv_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 555, comm, &mat->recv_req[mat->num_recv]); hypre_MPI_Send_init(&mat->recvbuf[i+num_local], j-i, hypre_MPI_REAL, this_pe, 666, comm, &mat->send_req2[mat->num_recv]); mat->num_recv++; } } /*-------------------------------------------------------------------------- * SetupSends * This function will wait for all receives to complete. *--------------------------------------------------------------------------*/ static void SetupSends(Matrix *mat, HYPRE_Int *inlist) { HYPRE_Int i, j, mype, npes; hypre_MPI_Request *requests; hypre_MPI_Status *statuses; MPI_Comm comm = mat->comm; hypre_MPI_Comm_rank(comm, &mype); hypre_MPI_Comm_size(comm, &npes); requests = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST); statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST); /* Determine size of and allocate sendbuf and sendind */ mat->sendlen = 0; for (i=0; i<npes; i++) mat->sendlen += inlist[i]; mat->sendbuf = NULL; mat->sendind = NULL; if (mat->sendlen) { mat->sendbuf = hypre_TAlloc(HYPRE_Real, mat->sendlen , HYPRE_MEMORY_HOST); mat->sendind = hypre_TAlloc(HYPRE_Int, mat->sendlen , HYPRE_MEMORY_HOST); } j = 0; mat->num_send = 0; for (i=0; i<npes; i++) { if (inlist[i] != 0) { /* Post receive for the actual indices */ hypre_MPI_Irecv(&mat->sendind[j], inlist[i], HYPRE_MPI_INT, i, 444, comm, &requests[mat->num_send]); /* Set up the send */ hypre_MPI_Send_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 555, comm, &mat->send_req[mat->num_send]); /* Set up the receive for the transpose */ hypre_MPI_Recv_init(&mat->sendbuf[j], inlist[i], hypre_MPI_REAL, i, 666, comm, &mat->recv_req2[mat->num_send]); mat->num_send++; j += inlist[i]; } } hypre_MPI_Waitall(mat->num_send, requests, statuses); hypre_TFree(requests,HYPRE_MEMORY_HOST); hypre_TFree(statuses,HYPRE_MEMORY_HOST); /* convert global indices to local indices */ /* these are all indices on this processor */ for (i=0; i<mat->sendlen; i++) mat->sendind[i] -= mat->beg_row; } /*-------------------------------------------------------------------------- * MatrixComplete *--------------------------------------------------------------------------*/ void MatrixComplete(Matrix *mat) { HYPRE_Int mype, npes; HYPRE_Int *outlist, *inlist; HYPRE_Int row, len, *ind; HYPRE_Real *val; hypre_MPI_Comm_rank(mat->comm, &mype); hypre_MPI_Comm_size(mat->comm, &npes); mat->recv_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST); mat->send_req = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST); mat->recv_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST); mat->send_req2 = hypre_TAlloc(hypre_MPI_Request, npes , HYPRE_MEMORY_HOST); mat->statuses = hypre_TAlloc(hypre_MPI_Status, npes , HYPRE_MEMORY_HOST); outlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST); inlist = hypre_CTAlloc(HYPRE_Int, npes, HYPRE_MEMORY_HOST); /* Create Numbering object */ mat->numb = NumberingCreate(mat, PARASAILS_NROWS); SetupReceives(mat, mat->numb->num_ind - mat->numb->num_loc, &mat->numb->local_to_global[mat->numb->num_loc], outlist); hypre_MPI_Alltoall(outlist, 1, HYPRE_MPI_INT, inlist, 1, HYPRE_MPI_INT, mat->comm); SetupSends(mat, inlist); hypre_TFree(outlist,HYPRE_MEMORY_HOST); hypre_TFree(inlist,HYPRE_MEMORY_HOST); /* Convert to local indices */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); NumberingGlobalToLocal(mat->numb, len, ind, ind); } } /*-------------------------------------------------------------------------- * MatrixMatvec * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvec(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i=0; i<mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i=0; i<num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(row,len,ind,val,temp,i) schedule(static) #endif for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i=0; i<len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } void MatrixMatvecSerial(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val, temp; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Put components of x into the right outgoing buffers */ for (i=0; i<mat->sendlen; i++) mat->sendbuf[i] = x[mat->sendind[i]]; hypre_MPI_Startall(mat->num_recv, mat->recv_req); hypre_MPI_Startall(mat->num_send, mat->send_req); /* Copy local part of x into top part of recvbuf */ for (i=0; i<num_local; i++) mat->recvbuf[i] = x[i]; hypre_MPI_Waitall(mat->num_recv, mat->recv_req, mat->statuses); /* do the multiply */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); temp = 0.0; for (i=0; i<len; i++) { temp = temp + val[i] * mat->recvbuf[ind[i]]; } y[row] = temp; } hypre_MPI_Waitall(mat->num_send, mat->send_req, mat->statuses); } /*-------------------------------------------------------------------------- * MatrixMatvecTrans * Can be done in place. *--------------------------------------------------------------------------*/ void MatrixMatvecTrans(Matrix *mat, HYPRE_Real *x, HYPRE_Real *y) { HYPRE_Int row, i, len, *ind; HYPRE_Real *val; HYPRE_Int num_local = mat->end_row - mat->beg_row + 1; /* Set up persistent communications */ /* Assumes MatrixComplete has been called */ /* Post receives for local parts of the solution y */ hypre_MPI_Startall(mat->num_send, mat->recv_req2); /* initialize accumulator buffer to zero */ for (i=0; i<mat->recvlen+num_local; i++) mat->recvbuf[i] = 0.0; /* do the multiply */ for (row=0; row<=mat->end_row - mat->beg_row; row++) { MatrixGetRow(mat, row, &len, &ind, &val); for (i=0; i<len; i++) { mat->recvbuf[ind[i]] += val[i] * x[row]; } } /* Now can send nonlocal parts of solution to other procs */ hypre_MPI_Startall(mat->num_recv, mat->send_req2); /* copy local part of solution into y */ for (i=0; i<num_local; i++) y[i] = mat->recvbuf[i]; /* alternatively, loop over a wait any */ hypre_MPI_Waitall(mat->num_send, mat->recv_req2, mat->statuses); /* add all the incoming partial sums to y */ for (i=0; i<mat->sendlen; i++) y[mat->sendind[i]] += mat->sendbuf[i]; hypre_MPI_Waitall(mat->num_recv, mat->send_req2, mat->statuses); }
fc_compute.h
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/jit_kernel.h" namespace paddle { namespace operators { namespace math { template <typename DeviceContext, typename T> inline void FCCompute(const BlasT<DeviceContext, T>& blas, const int M, const int N, const int K, const T* X, const T* W, T* Y, const T* B = NULL, bool relu = false) { blas.MatMul(M, N, K, X, W, Y); if (B == NULL) { return; } if (relu) { const auto& vaddrelu = jitkernel::KernelPool::Instance() .template Get<jitkernel::VAddReluKernel<T>>(N); for (int i = 0; i < M; i++) { T* dst = Y + i * N; vaddrelu->Compute(B, dst, dst, N); } } else { const auto& vadd = jitkernel::KernelPool::Instance() .template Get<jitkernel::VAddKernel<T>>(N); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < M; i++) { T* dst = Y + i * N; vadd->Compute(B, dst, dst, N); } } } } // namespace math } // namespace operators } // namespace paddle
GB_unop__exp_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__exp_fc32_fc32) // op(A') function: GB (_unop_tran__exp_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = cexpf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cexpf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = cexpf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EXP || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__exp_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = cexpf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__exp_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
eavlInfoTopologySparseMapOp.h
// Copyright 2010-2014 UT-Battelle, LLC. See LICENSE.txt for more information. #ifndef EAVL_INFO_TOPOLOGY_SPARSE_MAP_OP_H #define EAVL_INFO_TOPOLOGY_SPARSE_MAP_OP_H #include "eavlCUDA.h" #include "eavlCellSet.h" #include "eavlCellSetExplicit.h" #include "eavlCellSetAllStructured.h" #include "eavlDataSet.h" #include "eavlArray.h" #include "eavlOpDispatch.h" #include "eavlOperation.h" #include "eavlTopology.h" #include "eavlException.h" #include <time.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #ifndef DOXYGEN template <class CONN> struct eavlInfoTopologySparseMapOp_CPU { static inline eavlArray::Location location() { return eavlArray::HOST; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int *sparseindices = get<0>(indices).array; #pragma omp parallel for for (int denseindex = 0; denseindex < nitems; ++denseindex) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(sparseindex, inputs)); } } }; #if defined __CUDACC__ template <class CONN, class F, class IN, class OUT, class INDEX> __global__ void eavlInfoTopologySparseMapOp_kernel(int nitems, CONN conn, const IN inputs, OUT outputs, INDEX indices, F functor) { int *sparseindices = get<0>(indices).array; const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int denseindex = threadID; denseindex < nitems; denseindex += numThreads) { int sparseindex = sparseindices[get<0>(indices).indexer.index(denseindex)]; int shapeType = conn.GetShapeType(sparseindex); collect(sparseindex, outputs) = functor(shapeType, collect(sparseindex, inputs)); } } template <class CONN> struct eavlInfoTopologySparseMapOp_GPU { static inline eavlArray::Location location() { return eavlArray::DEVICE; } template <class F, class IN, class OUT, class INDEX> static void call(int nitems, CONN &conn, const IN inputs, OUT outputs, INDEX indices, F &functor) { int numThreads = 256; dim3 threads(numThreads, 1, 1); dim3 blocks (32, 1, 1); eavlInfoTopologySparseMapOp_kernel<<< blocks, threads >>>(nitems, conn, inputs, outputs, indices, functor); CUDA_CHECK_ERROR(); } }; #endif #endif // **************************************************************************** // Class: eavlInfoTopologySparseMapOp // // Purpose: /// Map from one element in a mesh to the same element, with /// topological information passed along to the functor. /// In this sparse version of the operation, the inputs on the destination /// topology and the outputs are all sparsely indexed by the index array. // // Programmer: Jeremy Meredith // Creation: August 1, 2013 // // Modifications: // **************************************************************************** template <class I, class O, class INDEX, class F> class eavlInfoTopologySparseMapOp : public eavlOperation { protected: eavlCellSet *cells; eavlTopology topology; I inputs; O outputs; INDEX indices; F functor; public: eavlInfoTopologySparseMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX ind, F f) : cells(c), topology(t), inputs(i), outputs(o), indices(ind), functor(f) { } virtual void GoCPU() { eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_CPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_CPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } } virtual void GoGPU() { #ifdef HAVE_CUDA eavlCellSetExplicit *elExp = dynamic_cast<eavlCellSetExplicit*>(cells); eavlCellSetAllStructured *elStr = dynamic_cast<eavlCellSetAllStructured*>(cells); int n = outputs.first.length(); if (elExp) { eavlExplicitConnectivity &conn = elExp->GetConnectivity(topology); conn.shapetype.NeedOnDevice(); conn.connectivity.NeedOnDevice(); conn.mapCellToIndex.NeedOnDevice(); eavlOpDispatch<eavlInfoTopologySparseMapOp_GPU<eavlExplicitConnectivity> >(n, conn, inputs, outputs, indices, functor); conn.shapetype.NeedOnHost(); conn.connectivity.NeedOnHost(); conn.mapCellToIndex.NeedOnHost(); } else if (elStr) { eavlRegularConnectivity conn = eavlRegularConnectivity(elStr->GetRegularStructure(),topology); eavlOpDispatch<eavlInfoTopologySparseMapOp_GPU<eavlRegularConnectivity> >(n, conn, inputs, outputs, indices, functor); } #else THROW(eavlException,"Executing GPU code without compiling under CUDA compiler."); #endif } }; // helper function for type deduction template <class I, class O, class INDEX, class F> eavlInfoTopologySparseMapOp<I,O,INDEX,F> *new_eavlInfoTopologySparseMapOp(eavlCellSet *c, eavlTopology t, I i, O o, INDEX indices, F f) { return new eavlInfoTopologySparseMapOp<I,O,INDEX,F>(c,t,i,o,indices,f); } #endif
tseb_eta.c
/*Norman and Kustas 2 source model */ /* code by Andrew French 2002 */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <gdal.h> #include <string.h> #include "tseb_eta.h" void usage() { printf( "-----------------------------------------\n"); printf( "--Modis Processing OpenMP Code------------\n"); printf( "-----------------------------------------\n"); printf( "./eta_tseb inLst\n"); printf( "\tout_ETA_TSEB\n"); printf( "-----------------------------------------\n"); return; } int main(int argc,char *argv[]) { if( argc < 2) { usage(); return 1; } //Loading the input files names //----------------------------- char *inB1 = argv[1]; //LST char *smF = argv[2]; //Output Soil Moisture /**GDAL STUFF***************/ //Loading the input files //----------------------- GDALAllRegister(); GDALDatasetH hD1 = GDALOpen(inB1,GA_ReadOnly);//LST if(hD1==NULL){ printf("One or more input files "); printf("could not be loaded\n"); exit(1); } //Loading the file infos //---------------------- GDALDriverH hDr1 = GDALGetDatasetDriver(hD1); //Creating output file GDALDatasetH hDOut = GDALCreateCopy( hDr1, smF,hD1,FALSE,NULL,NULL,NULL); GDALRasterBandH hBOut = GDALGetRasterBand(hDOut,1); //Loading the file bands GDALRasterBandH hB1 = GDALGetRasterBand(hD1,1); int nX = GDALGetRasterBandXSize(hB1); int nY = GDALGetRasterBandYSize(hB1); int N=nX*nY; //rowxcol processing in Device Memory /* Allocate arrays on host*/ float *lst = (float*) malloc(N*sizeof(float)); float *sm = (float*) malloc(N*sizeof(float)); /* Read input files through GDAL */ GDALRasterIO(hB1,GF_Read,0,0,nX,nY,lst,nX,nY,GDT_Float32,0,0); float lst_h=0.0,lst_c=400.0; int i; #pragma omp parallel for default(none) \ private(i)\ shared(N, lst, lst_h, lst_c, sm ) for(i=0;i<N;i++){ sm[i]=0.0; if(lst[i]*0.02>=250.0&&lst[i]*0.02<345.0){ if (lst[i]*0.02>lst_h) lst_h=lst[i]*0.02; if (lst[i]*0.02<lst_c) lst_c=lst[i]*0.02; }} #pragma omp barrier printf("%f %f\n",lst_h, lst_c); meteo met ={0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; tkstruct tk ={0.0,0.0,0.0,0.0}; soilabsemiss soilabs_ems ={0.0,0.0,0.0}; leafabsemiss leafabs_ems ={0.0,0.0,0.0,0.0}; radweights radwts ={0.0,0.0,0.8,0.2,0.9,0.1}; Choud choudparms ={0.0,0.0}; NDVIrng ndvi_rng ={0.0,0.0,0.0}; Cover vegcover ={0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; refhts Z ={0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; fluxstruct Flux ={0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; resistor resist ={0.0,0.0}; CanopyLight cpylight ={0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}; LngWave RL ={0.0,0.0,0.0}; Albeds albedvegcan; /** PARSING INFO FILE **/ char dumrec[NUMCHAR];//dummy record used twice in the main() char infofil[NUMCHAR]; /************************************************************************************/ /* IN ORDER OF READING */ /************************************************************************************/ char inputimagefil[NUMCHAR],outputimagefil[NUMCHAR]; int numbands,numpixels,numlines; double tkscale,tkoffset; double ndviscale,ndvioffset; int numlandusetypes=200;//Number of land use types to read (initialized at 200) /*allocate space for land use data */ char **landusename = (char **) malloc(numlandusetypes*sizeof(char *)); if(landusename == NULL) { printf("Could not allocate space for landusename, exiting!\n"); exit(EXIT_FAILURE); } double *canopy_height = (double *) malloc(numlandusetypes*sizeof(double)); if(canopy_height==NULL) { printf("Could not allocate space for canopy heights, exiting!\n"); exit(EXIT_FAILURE); } int *landusecode = (int *) malloc(numlandusetypes*sizeof(int)); if(landusecode==NULL) { printf("Could not allocate space for land use codes, exiting!\n"); exit(EXIT_FAILURE); } double cG;//cG constant (g0) /************************************************************************************/ // END OF INPUT PARAMETERS TO CATCH FROM FILE /************************************************************************************/ char curlandusename[NUMCHAR]; int refinfo[5]; short int *invec,*outvec,*outvecz; int imageluval,imageluindex; int bct,pct,lct; int outbands = 12; int stabflag = 0; int exitflag = 0; int iternum; int condenseflag; int nodataval = 0; double tempval,esat,eslope,rsat,rhodryair; double lst,lndvi; double frac_cover,LAI; double ndviflt; double const Lmonobfinit = -1e9; double Lmonobfprev; double Re,kB; FILE *fimgp; FILE *fomgp; nodataval = NODATAVAL; /*grab control information from file designated on command line*/ if(argc < 2) { printf("you must enter a control file name!\n"); exit(EXIT_FAILURE); } strcpy(infofil,argv[1]); printf("information file is:\n %s\n",infofil); // parse_input_file(infofil); /**START PROCESSING**/ /*compute air density, vapor pressure, cp, esat, specific humidity*/ richesat_ptr(tk.air,&esat,&eslope); printf("esat is %f and eslope is %f\n",esat,eslope); met.esat = esat; met.desat_dtk = eslope; rsat = e2r(esat,met.presmbar); met.mixratio = met.rh*rsat; printf("mixing ratio is %f\n",met.mixratio); tempval = r2e(met.mixratio,met.presmbar); met.ea = tempval; printf("vapor pressure ea is %f\n",met.ea); tempval = e_mb2rhoden(met.ea,tk.air); met.vapden = 0.001*tempval; rhodryair = 0.001*rhodry(met.vapden,met.presmbar,tk.air); met.rhoair = e2rhomoist(met.ea,met.presmbar,tk.air); tempval = mixr2spechum(met.mixratio); met.spechum = tempval; tempval = cpd2cp(met.spechum); met.cp = tempval; met.lambdav = latenthtvap(k2c(tk.air)); met.gamma = gammafunc(tk.air,met.presmbar,met.cp,met.lambdav); /*Finished filling meteorological structure 'met' */ printf("air temperature(Kelvin): %f\n",tk.air); printf("met structure:\n"); printf("rh %f ea %f mixratio %f spechum %f\n",met.rh,met.ea,met.mixratio,met.spechum); printf("vapden %f rhoair %f cp %f esat %f\n",met.vapden,met.rhoair,met.cp,met.esat); /*allocate input vector */ invec = (short int *) malloc(numbands*sizeof(short int)); /*allocate output vector */ outvec = (short int *) malloc(outbands*sizeof(short int)); /*allocate and initialize nodata output vector */ outvecz = (short int *) malloc(outbands*sizeof(short int)); for(bct=0;bct<outbands;bct++) { outvecz[bct] = (short int) nodataval; } /*open input image file */ if((fimgp = fopen(inputimagefil,"rb"))==NULL) { printf("Cannot open input image file!!\n"); exit(EXIT_FAILURE); }; Z.d0bare = 0.01; /*open output image file */ if((fomgp = fopen(outputimagefil,"wb"))==NULL) { printf("Cannot open output image file!!\n"); exit(EXIT_FAILURE); } /*read input data, pixel by pixel */ for(lct=0;lct<numlines;lct++) { printf("=====\n"); printf("now on line: %d\n",lct+1); refinfo[0] = lct; for(pct=0;pct<numpixels;pct++) { if(pct==22 && lct == 4) { printf("stopping\n"); } refinfo[1] = pct; /* printf("now on pixel %d\n",pct+1);*/ fread(invec,sizeof(short int)*numbands,1,fimgp); refinfo[2] = invec[0]; refinfo[3] = invec[1]; refinfo[4] = invec[2]; /* printf("skipped to 300/300\n");*/ /* printf("values: %d %d\n",invec[0],invec[1]);*/ /*invec values are temperature in Kelvin*scale + offset (short int) */ /* and NDVI * scale+offset (short int) */ lst = (((double) invec[0])*tkscale+tkoffset)-273.15; /* printf("lst: %f\n",lst);*/ /* lndvi = ((double) invec[1])/10000.0;*/ lndvi = ((double) invec[1])*ndviscale+ndvioffset; /* CHECK THE NDVI RANGE INPUT */ if((lst < 0.00) || (lndvi < -0.5) || (lndvi > 1.0)) { /*bad or no data */ for(bct=0;bct<outbands;bct++) { fwrite(&nodataval,sizeof(short int),1,fomgp); } /*end of for bct loop */ } else { /*proceed with processing this pixel */ /*hard code land use for now, will need to insert landuse layer later*/ if(invec[2]==0) imageluval = 14; else imageluval = invec[2]; /*get canopy height*/ imageluindex = -1; do { imageluindex++; } while(imageluval != landusecode[imageluindex]); strcpy(dumrec,landusename[imageluindex]); sscanf(dumrec,"%s",curlandusename); /* printf("Landuse code is %d and canopy height is %f\n",*/ /* imageluval,canopy_height[imageluindex]);*/ /* printf("Current land use name is: %s\n",curlandusename);*/ vegcover.canopyheight = canopy_height[imageluindex]; /*compute clumping factor for given viewangle */ vegcover.clumpfactor = clump_factor(vegcover.clumpfactornadir,vegcover.canopyheight,vegcover.canopywidth,vegcover.viewangrad); /*set displacement and roughness lengths */ Z.d0 = 0.67*canopy_height[imageluindex]; Z.z0 = 0.125*canopy_height[imageluindex]; /*get fractional cover and lai values*/ ndviflt = lndvi; /*((double) invec[1])/1000.0;*/ frac_cover = frac_cover_choud(ndvi_rng.min,ndvi_rng.max,ndviflt,choudparms.p); LAI = LAI_choudfunc(frac_cover,choudparms.Beta); /* printf("fractional cover: %f LAI: %f\n",frac_cover,LAI);*/ vegcover.frac = frac_cover; vegcover.LAI = LAI; /*Set visible light diffusion paramter according to LAI value */ if(vegcover.LAI < 0.5) radwts.Kd = 0.9; else if(vegcover.LAI > 2.0) radwts.Kd = 0.7; else radwts.Kd = 0.6; /*copy remote sensing temperature to tk.composite member */ tk.composite = lst+273.15; /*((double) invec[0])/10.0; */ /*initialize canopy temperature */ if(vegcover.frac < 0.5) tk.canopy = 0.5*(tk.air+tk.composite); else tk.canopy = tk.air; /*initialize soil temperature */ if(vegcover.frac < 0.8) component_tempk_soil(vegcover.frac,&tk); /*if cover is thick, cant rely on getting good soil temperature */ /* from repartitioning tir data, so set it equal to composite value */ else tk.soil = tk.composite; /*guess stability condition */ /*set stabflag to 1 for unstable conditions */ Z.L = Lmonobfinit; if(tk.composite > tk.air) stabflag = 1; else stabflag = 0; /*if unstable conditions, compute Businger-Dyer psi and phi functions */ /* otherwise zero out the phi functions*/ /* printf("Computing stability correction functions.\n");*/ /* phiandpsi(&Z,&vegcover);*/ if(stabflag == 1) xandpsi(&Z,&vegcover); else stabphipsi(&Z); /*compute light conditions if there is a canopy */ /* select % cover as threshold */ /* printf("fractional cover: %f\n",vegcover.frac);*/ if(vegcover.frac > 0.1) { /*compute soil and canopy albedo values */ /*first compute canopy reflectivities and transmissivities*/ /* for vis and nir under direct and diffuse light */ canopyrho(&met,&vegcover,&radwts,&leafabs_ems,&soilabs_ems,&cpylight); /*compute reflectivity of soil and canopy*/ /* also canopy transmissivity */ rhosoil2albedo(&soilabs_ems,&albedvegcan); rhocpy2albedo(&cpylight,&albedvegcan,&radwts); } else { /* compute light conditions where no canopy exists */ /*set canopy reflectivitie to zero and transmissivities to 1.0 */ /* printf("light where no canopy\n"); */ cpylight.rhovisdir = 0.0; cpylight.rhonirdir = 0.0; cpylight.rhovisdif = 0.0; cpylight.rhonirdif = 0.0; cpylight.tauvisdir = 1.0; cpylight.taunirdir = 1.0; cpylight.tauvisdif = 1.0; cpylight.taunirdif = 1.0; cpylight.tautir = 1.0; rhosoil2albedo(&soilabs_ems,&albedvegcan); } /*end of else */ /*BEGIN ITERATION FOR UNSTABLE CONDITIONS */ iternum = 0; exitflag = 0; /* printf("curlandusename: %s\n",curlandusename);*/ while(exitflag == 0) { /* || stabflag == 1) {*/ iternum++; /* printf("iternum is %d\n",iternum);*/ /* printf("value of if: %d\n",strcmp(curlandusename,"bare"));*/ if((strcmp(curlandusename,"bare")==0) || (strcmp(curlandusename,"urban")==0) || (strcmp(curlandusename,"water")==0) || (invec[1] < -1000) || (ndviflt <= ndvi_rng.baresoil)) { /*if true then have a one layer case */ /*could be bare soil, urban or water */ /* printf("in one layer routine.\n"); */ /*compute wind speed and air resistance */ getwindbare(&met,&Z,&resist,&tk); /* printf("for bare, resist.air %f resist.soil %f\n",resist.air,resist.soil);*/ /*compute turbulent fluxes from bare soil; also check for condensation*/ onelayer(&Flux,&tk,cG,&met,&albedvegcan,&soilabs_ems, &leafabs_ems,&resist, &RL); /* exitflag = 1;*/ } else { /*in this case there are two layers, soil and canopy */ /*compute resistance of air, wind speeds at canopy top and soil surface */ /* printf("in two layer routine.\n");*/ getwind(&met,&Z,&vegcover,&resist); /* printf("two layers: resist.air %f resist.soil: %f\n",resist.air,resist.soil);*/ /* printf("met.usoil: %f met.windspeed: %f met.ustar: %f\n",met.usoil,met.windspeed,met.ustar);*/ /*compute long wave radiation from soil, sky and canopy */ getrls(&met,&tk,&soilabs_ems,&leafabs_ems,&RL); /*compute net radiation components and soil flux */ getRnG(&cpylight,&albedvegcan,&RL,&met,&Flux,cG,&vegcover,refinfo); printf("RL.soil %f RL.air %f RL.canopy %f\n",RL.soil,RL.air, RL.canopy); printf("Rn: %f Rncanopy: %f Rnsoil: %f\n",Flux.Rntotal,Flux.Rncanopy,Flux.Rnsoil); /*compute turbulent flux components */ twolayer(&tk,&Flux,&met,&resist,&vegcover); /*check for computed condensation condition */ /* if it exist,it is unrealistic, dont allow */ /*force LE to be at least zero on soil and canopy */ condenseflag = nocondense(&Flux,&tk,&resist,&met,&vegcover); } /*end of else */ /* sum soil and canopy fluxes of each kind */ Flux.Htotal = (Flux.Hsoil) + (Flux.Hcanopy); Flux.LEtotal = (Flux.LEsoil) + (Flux.LEcanopy); /*revise Monin-Obukhov stability length */ Lmonobfprev = Z.L; monobfs(&met,&tk,&Flux,&Z); /* printf("Z.L : %f Z.Lprevious: %f\n",Z.L,Lmonobfprev);*/ /*check if iteration still needed */ /* printf("Ldiff: %f\n",Z.L-Lmonobfprev);*/ /* printf("current exitflag value is %d\n",exitflag);*/ if((((Z.L)-Lmonobfprev ) < 0.001) && ((Z.L)-Lmonobfprev) > -0.001) exitflag = 1; /* printf("exitflag set to one, small difference\n");*/ else { if((Z.L) > 0.0 && (Lmonobfprev > 0.0)) { exitflag = 1; /* printf("set exitflag to one since both L's positive\n");*/ } else { if(iternum > MAXITER) { exitflag = 1; /* printf("set exitflag to one, exceeded MAXITER \n");*/ } else { exitflag = 0; xandpsi(&Z,&vegcover); } /*end of last if */ } /*end of second else */ } /*end of first else */ /* printf("end of while loop, exitflag is: %d\n",exitflag);*/ } /*end of while */ /*Do stable conditions */ if(stabflag == 0) { /*set Psi values to zero */ /*stable case doesnt need iteration */ stabphipsi(&Z); /*need to consider both two layer and one layer cases */ if((strcmp(curlandusename,"bare")==0) || (strcmp(curlandusename,"urban")==0)) { /*if true then have a one layer case */ /*could be bare soil or urban, do water differently */ /*compute wind speed and air resistance */ getwindbare(&met,&Z,&resist,&tk); /*compute turbulent fluxes from bare soil; also check for condensation;*/ onelayer(&Flux,&tk,cG,&met,&albedvegcan,&soilabs_ems, &leafabs_ems,&resist, &RL); } else { if(strcmp(curlandusename,"water")==0) { /*open water case */ getwindbare(&met,&Z,&resist,&tk); /*compute G and Rn for open water */ radiatewaters(&tk,&Flux,&met,&albedvegcan); /*compute Reynolds number for water */ Re = reynolds(&tk,&met,&Z); Z.z0h = (Z.z0)*7.48*exp(-2.46*pow(Re,0.25)); kB = log((Z.z0)/(Z.z0h)); resist.air = ((log(((Z.t)-(Z.d0))/(Z.z0)))+kB)/((met.ustar)*VONKARMAN); hfluxresist_soil(&tk,&resist,&Flux,&met); Flux.LEsoil = (Flux.Rnsoil)-(Flux.G)-(Flux.Hsoil); Flux.Hcanopy = 0.0; Flux.LEcanopy = 0.0; Flux.Htotal = Flux.Hsoil; Flux.LEtotal = Flux.LEsoil; /* N.B.-- member designation 'soil' here means 'water'!! */ /*end of open water case */ } else { /*do stable two layer case */ /*computation is same as for unstable two layer case, except */ /* that the stability functions are not used */ getwind(&met,&Z,&vegcover,&resist); /*compute long wave radiation from soil, sky and canopy */ getrls(&met,&tk,&soilabs_ems,&leafabs_ems,&RL); getRnG(&cpylight,&albedvegcan,&RL,&met,&Flux,cG,&vegcover,refinfo); twolayer(&tk,&Flux,&met,&resist,&vegcover); condenseflag = nocondense(&Flux,&tk,&resist,&met,&vegcover); /*end of stable two layer case */ } } } /* end of if stabflag==0, ie stable conditions exist */ outvec[0] = (short int) Flux.Hsoil; outvec[1] = (short int) Flux.Hcanopy; outvec[2] = (short int) Flux.LEsoil; outvec[3] = (short int) Flux.LEcanopy; outvec[4] = (short int) Flux.G; outvec[5] = (short int) Flux.Rntotal; if(lct==22 && pct==4) { printf("Flux.Rntotal: %f lct: %d pct: %d\n",Flux.Rntotal,lct,pct); exit(1); } outvec[6] = (short int) invec[0]; outvec[7] = (short int) invec[1]; outvec[8] = (short int) imageluval; outvec[9] = (short int) (vegcover.LAI*1000); outvec[10] = (short int) resist.air; outvec[11] = (short int) resist.soil; fwrite(outvec,sizeof(short int)*outbands,1,fomgp); Z.L = Lmonobfinit ; for(bct=0;bct<outbands;bct++) { outvec[bct] = outvecz[bct]; } /*end of reset outvec to outvecz */ } /*end of else condition where data are deemed good */ } /*end of pct loop */ }/*end of lct loop */ fclose(fimgp); fclose(fomgp); printf("wrote:\n%s\n",outputimagefil); printf("Bands: %d Pixels: %d Lines: %d\n",outbands,numpixels,numlines); return(EXIT_SUCCESS); } /*end of main() */
GB_unop__identity_bool_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_bool_uint8) // op(A') function: GB (_unop_tran__identity_bool_uint8) // C type: bool // A type: uint8_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ bool z = (bool) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ bool z = (bool) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_bool_uint8) ( bool *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; bool z = (bool) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_bool_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_tasking.17.c
/* * @@name: tasking.17c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.0 */ #include <stdio.h> int main() { int x; #pragma omp parallel #pragma omp single { #pragma omp task shared(x) depend(out: x) x = 1; #pragma omp task shared(x) depend(out: x) x = 2; #pragma omp taskwait printf("x = %d\n", x); } return 0; }
GB_binop__second_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_int32) // A.*B function (eWiseMult): GB (_AemultB_08__second_int32) // A.*B function (eWiseMult): GB (_AemultB_02__second_int32) // A.*B function (eWiseMult): GB (_AemultB_04__second_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_int32) // A*D function (colscale): GB (_AxD__second_int32) // D*A function (rowscale): GB (_DxB__second_int32) // C+=B function (dense accum): GB (_Cdense_accumB__second_int32) // C+=b function (dense accum): GB (_Cdense_accumb__second_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_int32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int32_t // A type: int32_t // A pattern? 1 // B type: int32_t // B pattern? 0 // BinaryOp: cij = bij #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_INT32 || GxB_NO_SECOND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__second_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
pi.c
#include <stdio.h> #include <omp.h> static long num_steps = 100000; double step, pi; int main() { long i; double x, sum = 0.0; step = 1.0 / (double) num_steps; #pragma omp parallel for private(x) reduction(+:sum) for (i = 0; i < num_steps; i++) { x = (i + 0.5) * step; sum = sum + 4.0 / (1.0 + x * x); } pi = step * sum; printf("Pi = %f\n", pi); return 0; }
GB_unaryop__identity_uint32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_int8 // op(A') function: GB_tran__identity_uint32_int8 // C type: uint32_t // A type: int8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_int8 ( uint32_t *Cx, // Cx and Ax may be aliased int8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 8; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
GB_binop__lt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__lt_uint16 // A.*B function (eWiseMult): GB_AemultB__lt_uint16 // A*D function (colscale): GB_AxD__lt_uint16 // D*A function (rowscale): GB_DxB__lt_uint16 // C+=B function (dense accum): GB_Cdense_accumB__lt_uint16 // C+=b function (dense accum): GB_Cdense_accumb__lt_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lt_uint16 // C=scalar+B GB_bind1st__lt_uint16 // C=scalar+B' GB_bind1st_tran__lt_uint16 // C=A+scalar GB_bind2nd__lt_uint16 // C=A'+scalar GB_bind2nd_tran__lt_uint16 // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x < y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_UINT16 || GxB_NO_LT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__lt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__lt_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__lt_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__lt_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__lt_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__lt_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__lt_uint16 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__lt_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = Bx [p] ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__lt_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = Ax [p] ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB_bind1st_tran__lt_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB_bind2nd_tran__lt_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__lt_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__lt_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp32) // A*D function (colscale): GB (_AxD__lt_fp32) // D*A function (rowscale): GB (_DxB__lt_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lt_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lt_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp32) // C=scalar+B GB (_bind1st__lt_fp32) // C=scalar+B' GB (_bind1st_tran__lt_fp32) // C=A+scalar GB (_bind2nd__lt_fp32) // C=A'+scalar GB (_bind2nd_tran__lt_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lt_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lt_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lt_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lt_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lt_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lt_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lt_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lt_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lt_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__lt_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__lt_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Volumes.h
#pragma once #include <iostream> #include <iomanip> #include <string> #include <sstream> #include <fstream> #include "UniformGrid.h" namespace cpp_mc { /** * Use boolean operation to construct implicite surfaces * Given two sets A, B * F(intersection(A,B)) = MAX(A,B) * F(union(A,B)) = MIN(A,B) * F(subtraction(A,B)) = MAX(A,-B) */ class Volumes { public: using uchar = unsigned char; using ushort = unsigned short; using uint = unsigned int; using Vertex = cpp_mc::Vector; using Point = cpp_mc::Vector; using Normal = cpp_mc::Vector; using UGrid = cpp_mc::UniformGrid; using Index = UGrid::Index; using BBox = UGrid::BBox; public: // surface cases that can be computed with this class enum class Surface { Sphere, Torus, TwoHoledTorus, MonkeySaddle, GenusTwo, iWP, Neovius, SternerRoman }; public: // read uniform grid from file template<typename T> void readFromFile(const std::string& i_file, UGrid& ugrid) { std::ifstream ifile; ifile.open(i_file, std::ios::binary); if (!ifile.is_open()) { exit(1); } int nx, ny, nz; float dx, dy, dz; ifile.read(reinterpret_cast<char*>(&nx), sizeof(int)); ifile.read(reinterpret_cast<char*>(&ny), sizeof(int)); ifile.read(reinterpret_cast<char*>(&nz), sizeof(int)); ifile.read(reinterpret_cast<char*>(&dx), sizeof(float)); ifile.read(reinterpret_cast<char*>(&dy), sizeof(float)); ifile.read(reinterpret_cast<char*>(&dz), sizeof(float)); double xmax = static_cast<double>(dx * (nx - 1)); double ymax = static_cast<double>(dy * (ny - 1)); double zmax = static_cast<double>(dz * (nz - 1)); BBox bbox; bbox[0] = Point{ 0, 0, 0 }; bbox[1] = Point{ xmax, 0, 0 }; bbox[2] = Point{ 0, ymax, 0 }; bbox[3] = Point{ xmax, ymax, 0 }; bbox[4] = Point{ 0, 0, zmax }; bbox[5] = Point{ xmax, 0, zmax }; bbox[6] = Point{ 0, ymax, zmax }; bbox[7] = Point{ xmax, ymax, zmax }; ugrid.init(nx, ny, nz, bbox, 0); ugrid.set_dx(dx); ugrid.set_dx(dz); ugrid.set_dx(dy); size_t size_ = static_cast<size_t>(nx) * static_cast<size_t>(ny) * static_cast<size_t>(nz); std::vector<double> v_data(size_); //ushort* t_buff = new ushort[size_]; std::vector<T> t_buff(size_); ifile.read(reinterpret_cast<char*>(&t_buff[0]), size_ * sizeof(T)); ifile.close(); for (int k = 0; k < nz; k++) { for (int j = 0; j < ny; j++) { { #pragma omp parallel for for (int i = 0; i < nx; i++) { ugrid.scalar(i, j, k, static_cast<double>(t_buff[k*ny*nx + j*nx + i])); } } } } // compute gradient for shading purpose ugrid.estimateGradient(); ugrid.flip_gradient(); } // computes the scalar values of the implicit function template<Surface T> void scalar(UGrid& ugrid, const int nx, const int ny, const int nz) { // center volume in [-1,1]^3 initUGrid(ugrid, nx, ny, nz); const double minX = ugrid.minX(); const double minY = ugrid.minX(); const double minZ = ugrid.minX(); const double dx = ugrid.dx(); const double dy = ugrid.dy(); const double dz = ugrid.dz(); double x = minX; for (int i = 0; i < ugrid.x_size(); i++) { double y = minY; for (int j = 0; j < ugrid.y_size(); j++) { { double z = minZ; for (int k = 0; k < ugrid.z_size(); k++) { //ugrid.scalar(i, j, k, x * x + y * y + z * z); ugrid.scalar(i, j, k, surface<T>(x, y, z)); z += dz; } } y += dy; } x += dx; } // compute gradient for shading purpose ugrid.estimateGradient(); ugrid.flip_gradient(); }; template<Surface T> double surface(const double x, const double y, const double z) { return x * x + y * y + z * z; } private: void initUGrid(UGrid& ugrid, const int nx, const int ny, const int nz) { BBox bb; bb[0] = { -1,-1,-1 }; bb[1] = { 1,-1,-1 }; bb[2] = { -1, 1,-1 }; bb[3] = { 1, 1,-1 }; bb[4] = { -1,-1, 1 }; bb[5] = { 1,-1, 1 }; bb[6] = { -1, 1, 1 }; bb[7] = { 1, 1, 1 }; ugrid.init(nx, ny, nz, bb); } double square(const double x) { return x * x; } double pi{ 3.14159265358979323846 }; }; template<> double Volumes::surface<Volumes::Surface::Sphere>(const double x, const double y, const double z) { return x * x + y * y + z * z; } template<> double Volumes::surface<Volumes::Surface::Torus>(const double x, const double y, const double z) { const double R = 0.6 * 0.6; const double r = 0.3 * 0.3; double val = (x * x + y * y + z * z + R - r); val = val * val; val = val - 4 * R * (x * x + y * y); return val; } template<> double Volumes::surface<Volumes::Surface::TwoHoledTorus>(const double x, const double y, const double z) { // center one torus at (-1/2,0,0), the other at (1/2,0,0) const double R = square(0.4); const double r = square(0.2); const double x1 = x + 0.4; const double x2 = x - 0.4; double val1 = square((square(x1) + square(y) + square(z) + R - r)); val1 = val1 - 4 * R * (square(x1) + square(y)); double val2 = square((square(x2) + square(y) + square(z) + R - r)); val2 = val2 - 4 * R * (square(x2) + square(y)); return std::min(val1, val2); } template<> double Volumes::surface<Volumes::Surface::MonkeySaddle>(const double x_, const double y_, const double z_) { const double alpha = 0.5; const double x = alpha * x_; const double y = alpha * y_; const double z = alpha * z_; return z - x * x * x - 3 * x * y * y; } template<> double Volumes::surface<Volumes::Surface::GenusTwo>(const double x_, const double y_, const double z_) { double alpha = 1.0; double x = (x_ + 1.0) / 2.0; double y = (y_ + 1.0) / 2.0; double z = (z_ + 1.0) / 2.0; x = alpha * (4 * x - 2); y = alpha * (4 * y - 2); z = alpha * (4 * z - 2); double val = 2 * y * (y * y - 3 * x * x) * (1 - z * z) + (x * x + y * y) * (x * x + y * y) - (9 * z * z - 1) * (1 - z * z); return val; } template<> double Volumes::surface<Volumes::Surface::iWP>(const double x_, const double y_, const double z_) { const float alpha = 5.01; //const float alpha = 1.01; const float x = alpha * (x_ + 1) * pi; const float y = alpha * (y_ + 1) * pi; const float z = alpha * (z_ + 1) * pi; return cos(x) * cos(y) + cos(y) * cos(z) + cos(z) * cos(x) - cos(x) * cos(y) * cos(z); // iso-value = 0 } template<> double Volumes::surface<Volumes::Surface::Neovius>(const double x_, const double y_, const double z_) { const float alpha = 1; const float x = alpha * (x_ + 1) * pi; const float y = alpha * (y_ + 1) * pi; const float z = alpha * (z_ + 1) * pi; return 3 * (cos(x) + cos(y) + cos(z)) + 4 * cos(x) * cos(y) * cos(z); // iso_value = 0.0 } template<> double Volumes::surface<Volumes::Surface::SternerRoman>(const double x_, const double y_, const double z_) { const float alpha = 1.5f; const float x = alpha * x_; const float y = alpha * y_; const float z = alpha * z_; auto sq = [](const double v) { return v * v; }; return sq(x * x + y * y + z * z - 1.0f) - (sq(z - 1) - 2.0f * x * x) * (sq(z + 1) - 2 * y * y); } } // namespace cpp_mc
GB_unop__identity_int64_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int64_int16 // op(A') function: GB_unop_tran__identity_int64_int16 // C type: int64_t // A type: int16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = (int64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int64_int16 ( int64_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int64_t z = (int64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nvptx_target_printf_codegen.c
// Test target codegen - host bc file has to be created first. // RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 // RUN: %clang_cc1 -verify -fopenmp -x c -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -verify -fopenmp -x c -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32 #include <stdarg.h> // expected-no-diagnostics extern int printf(const char *, ...); extern int vprintf(const char *, va_list); // Check a simple call to printf end-to-end. // CHECK: [[SIMPLE_PRINTF_TY:%[a-zA-Z0-9_]+]] = type { i32, i64, double } int CheckSimple() { // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+CheckSimple.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T1]]() // Alloca in entry block. // CHECK: [[BUF:%[a-zA-Z0-9_]+]] = alloca [[SIMPLE_PRINTF_TY]] // CHECK: {{call|invoke}} void [[T1]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] // printf in master-only basic block. // CHECK: [[FMT:%[0-9]+]] = load{{.*}}%fmt const char* fmt = "%d %lld %f"; // CHECK: [[PTR0:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 0 // CHECK: store i32 1, i32* [[PTR0]], align 4 // CHECK: [[PTR1:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 1 // CHECK: store i64 2, i64* [[PTR1]], align 8 // CHECK: [[PTR2:%[0-9]+]] = getelementptr inbounds [[SIMPLE_PRINTF_TY]], [[SIMPLE_PRINTF_TY]]* [[BUF]], i32 0, i32 2 // CHECK: store double 3.0{{[^,]*}}, double* [[PTR2]], align 8 // CHECK: [[BUF_CAST:%[0-9]+]] = bitcast [[SIMPLE_PRINTF_TY]]* [[BUF]] to i8* // CHECK: [[RET:%[0-9]+]] = call i32 @vprintf(i8* [[FMT]], i8* [[BUF_CAST]]) printf(fmt, 1, 2ll, 3.0); } return 0; } void CheckNoArgs() { // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+CheckNoArgs.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T2]]() // CHECK: {{call|invoke}} void [[T2]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] // printf in master-only basic block. // CHECK: call i32 @vprintf({{.*}}, i8* null){{$}} printf("hello, world!"); } } // Check that printf's alloca happens in the entry block, not inside the if // statement. int foo; void CheckAllocaIsInEntryBlock() { // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+CheckAllocaIsInEntryBlock.+]]_worker() #pragma omp target { // Entry point. // CHECK: define {{.*}}void [[T3]]( // Alloca in entry block. // CHECK: alloca %printf_args // CHECK: {{call|invoke}} void [[T3]]_worker() // CHECK: br label {{%?}}[[EXIT:.+]] // // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]], // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]] // // CHECK: [[MASTER]] // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() // CHECK: [[MTMP1:%.+]] = sub i32 [[MNTH]], [[MWS]] // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]] if (foo) { printf("%d", 42); } } }
Pragma.h
//===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PRAGMA_H #define LLVM_CLANG_LEX_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <cassert> namespace clang { class Preprocessor; class Token; class IdentifierInfo; class PragmaNamespace; /** * \brief Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * \brief The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * \brief The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * \brief The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: explicit PragmaHandler(StringRef name) : Name(name) {} PragmaHandler() {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return nullptr; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: EmptyPragmaHandler(); void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. /// llvm::StringMap<PragmaHandler*> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} virtual ~PragmaNamespace(); /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. /// void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() { return Handlers.empty(); } void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; PragmaNamespace *getIfNamespace() override { return this; } }; } // end namespace clang #endif
lock.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char* argv[]) { int var = 0; omp_lock_t lock; omp_init_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_lock(&lock); var++; omp_unset_lock(&lock); } omp_destroy_lock(&lock); fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK: DONE
j3d27pt.c
#define BENCH_DIM 3 #define BENCH_FPP 54 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize][dimsize])A1; if (scop) { #pragma scop for (int t = 0; t < timestep; t++) for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = (1.500f*A[t%2][i-1][j][k] + 0.500f*A[t%2][i-1][j-1][k-1] + 0.700f*A[t%2][i-1][j-1][k] + 0.900f*A[t%2][i-1][j-1][k+1] + 1.200f*A[t%2][i-1][j][k-1] + 1.201f*A[t%2][i-1][j][k+1] + 0.901f*A[t%2][i-1][j+1][k-1] + 0.701f*A[t%2][i-1][j+1][k] + 0.501f*A[t%2][i-1][j+1][k+1] + 1.510f*A[t%2][i][j][k] + 0.510f*A[t%2][i][j-1][k-1] + 0.710f*A[t%2][i][j-1][k] + 0.910f*A[t%2][i][j-1][k+1] + 1.210f*A[t%2][i][j][k-1] + 1.211f*A[t%2][i][j][k+1] + 0.911f*A[t%2][i][j+1][k-1] + 0.711f*A[t%2][i][j+1][k] + 0.511f*A[t%2][i][j+1][k+1] + 1.520f*A[t%2][i+1][j][k] + 0.520f*A[t%2][i+1][j-1][k-1] + 0.720f*A[t%2][i+1][j-1][k] + 0.920f*A[t%2][i+1][j-1][k+1] + 1.220f*A[t%2][i+1][j][k-1] + 1.221f*A[t%2][i+1][j][k+1] + 0.921f*A[t%2][i+1][j+1][k-1] + 0.721f*A[t%2][i+1][j+1][k] + 0.521f*A[t%2][i+1][j+1][k+1]) / 159; #pragma endscop } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++) A[(t+1)%2][i][j][k] = (1.500f*A[t%2][i-1][j][k] + 0.500f*A[t%2][i-1][j-1][k-1] + 0.700f*A[t%2][i-1][j-1][k] + 0.900f*A[t%2][i-1][j-1][k+1] + 1.200f*A[t%2][i-1][j][k-1] + 1.201f*A[t%2][i-1][j][k+1] + 0.901f*A[t%2][i-1][j+1][k-1] + 0.701f*A[t%2][i-1][j+1][k] + 0.501f*A[t%2][i-1][j+1][k+1] + 1.510f*A[t%2][i][j][k] + 0.510f*A[t%2][i][j-1][k-1] + 0.710f*A[t%2][i][j-1][k] + 0.910f*A[t%2][i][j-1][k+1] + 1.210f*A[t%2][i][j][k-1] + 1.211f*A[t%2][i][j][k+1] + 0.911f*A[t%2][i][j+1][k-1] + 0.711f*A[t%2][i][j+1][k] + 0.511f*A[t%2][i][j+1][k+1] + 1.520f*A[t%2][i+1][j][k] + 0.520f*A[t%2][i+1][j-1][k-1] + 0.720f*A[t%2][i+1][j-1][k] + 0.920f*A[t%2][i+1][j-1][k+1] + 1.220f*A[t%2][i+1][j][k-1] + 1.221f*A[t%2][i+1][j][k+1] + 0.921f*A[t%2][i+1][j+1][k-1] + 0.721f*A[t%2][i+1][j+1][k] + 0.521f*A[t%2][i+1][j+1][k+1]) / 159; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
array_args.h
#ifndef LIGHTGBM_UTILS_ARRAY_AGRS_H_ #define LIGHTGBM_UTILS_ARRAY_AGRS_H_ #include <vector> #include <algorithm> #include <LightGBM/utils/openmp_wrapper.h> namespace LightGBM { /*! * \brief Contains some operation for a array, e.g. ArgMax, TopK. */ template<typename VAL_T> class ArrayArgs { public: inline static size_t ArgMaxMT(const std::vector<VAL_T>& array) { int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } int step = std::max(1, (static_cast<int>(array.size()) + num_threads - 1) / num_threads); std::vector<size_t> arg_maxs(num_threads, 0); #pragma omp parallel for schedule(static,1) for (int i = 0; i < num_threads; ++i) { size_t start = step * i; if (start >= array.size()) { continue; } size_t end = std::min(array.size(), start + step); size_t arg_max = start; for (size_t j = start + 1; j < end; ++j) { if (array[j] > array[arg_max]) { arg_max = j; } } arg_maxs[i] = arg_max; } size_t ret = arg_maxs[0]; for (int i = 1; i < num_threads; ++i) { if (array[arg_maxs[i]] > array[ret]) { ret = arg_maxs[i]; } } return ret; } inline static size_t ArgMax(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } if (array.size() > 1024) { return ArgMaxMT(array); } else { size_t arg_max = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } } inline static size_t ArgMin(const std::vector<VAL_T>& array) { if (array.empty()) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < array.size(); ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static size_t ArgMax(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_max = 0; for (size_t i = 1; i < n; ++i) { if (array[i] > array[arg_max]) { arg_max = i; } } return arg_max; } inline static size_t ArgMin(const VAL_T* array, size_t n) { if (n <= 0) { return 0; } size_t arg_min = 0; for (size_t i = 1; i < n; ++i) { if (array[i] < array[arg_min]) { arg_min = i; } } return arg_min; } inline static void Partition(std::vector<VAL_T>* arr, int start, int end, int* l, int* r) { int i = start - 1; int j = end - 1; int p = i; int q = j; if (start >= end) { return; } std::vector<VAL_T>& ref = *arr; VAL_T v = ref[end - 1]; for (;;) { while (ref[++i] > v); while (v > ref[--j]) { if (j == start) { break; } } if (i >= j) { break; } std::swap(ref[i], ref[j]); if (ref[i] == v) { p++; std::swap(ref[p], ref[i]); } if (v == ref[j]) { q--; std::swap(ref[j], ref[q]); } } std::swap(ref[i], ref[end - 1]); j = i - 1; i = i + 1; for (int k = start; k <= p; k++, j--) { std::swap(ref[k], ref[j]); } for (int k = end - 2; k >= q; k--, i++) { std::swap(ref[i], ref[k]); } *l = j; *r = i; }; // Note: k refer to index here. e.g. k=0 means get the max number. inline static int ArgMaxAtK(std::vector<VAL_T>* arr, int start, int end, int k) { if (start >= end - 1) { return start; } int l = start; int r = end - 1; Partition(arr, start, end, &l, &r); // if find or all elements are the same. if ((k > l && k < r) || (l == start - 1 && r == end - 1)) { return k; } else if (k <= l) { return ArgMaxAtK(arr, start, l + 1, k); } else { return ArgMaxAtK(arr, r, end, k); } } // Note: k is 1-based here. e.g. k=3 means get the top-3 numbers. inline static void MaxK(const std::vector<VAL_T>& array, int k, std::vector<VAL_T>* out) { out->clear(); if (k <= 0) { return; } for (auto val : array) { out->push_back(val); } if (static_cast<size_t>(k) >= array.size()) { return; } ArgMaxAtK(out, 0, static_cast<int>(out->size()), k - 1); out->erase(out->begin() + k, out->end()); } inline static void Assign(std::vector<VAL_T>* array, VAL_T t, size_t n) { array->resize(n); for (size_t i = 0; i < array->size(); ++i) { (*array)[i] = t; } } inline static bool CheckAllZero(const std::vector<VAL_T>& array) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != VAL_T(0)) { return false; } } return true; } inline static bool CheckAll(const std::vector<VAL_T>& array, VAL_T t) { for (size_t i = 0; i < array.size(); ++i) { if (array[i] != t) { return false; } } return true; } }; } // namespace LightGBM #endif // LightGBM_UTILS_ARRAY_AGRS_H_
sort-algorithms.c
// // Created by Paige Riley Weber on 6/4/21. // #include "sort-algorithms.h" void insertionSort(float* arr, size_t n) { for (size_t i = 1; i < n; i++) { size_t j = i; while (j > 0 && arr[j] < arr[j-1]) { swap(&arr[j], &arr[j-1]); j--; } } } // assumes a and b are contiguous in memory, with 'a' first void merge(float * a, size_t a_n, float * b, size_t b_n, float * buffer) { size_t a_i = 0; // index of array a size_t b_i = 0; // index of array b size_t buffer_i = 0; while(a_i < a_n && b_i < b_n) { if(a[a_i] > b[b_i]) { // then b should come first buffer[buffer_i] = b[b_i]; b_i++; } else { // else a is <= b so we put a next buffer[buffer_i] = a[a_i]; a_i++; } buffer_i++; } // if either 'a' or 'b' has items left, copy them to 'merged' if(a_i < a_n) { memcpy(buffer + buffer_i, a + a_i, sizeof(float) * (a_n - a_i)); } if(b_i < b_n) { memcpy(buffer + buffer_i, b + b_i, sizeof(float) * (b_n - b_i)); } // then move stuff from 'buffer' back into original array memcpy(a, buffer, sizeof(float) * (a_n+b_n)); } // recursively merge-sorts an array in-place. void mergesort(float * array, float * buffer, size_t n) { // base case: an array of size 1 is always sorted if(n == 1) return; // create new pointers and sizes for the left and right halves. This is how // we "split" the array size_t left_n = n/2; float * left_array = array; float * left_buffer = buffer; size_t right_n = n - n/2; float * right_array = array + left_n; float * right_buffer = buffer + left_n; // mergesort each half mergesort(left_array, left_buffer, left_n); mergesort(right_array, right_buffer, right_n); // merge the sorted portions back together merge(left_array, left_n, right_array, right_n, buffer); } // helper function to match signature of other functions void mergeSort(float* arr, size_t n) { float * buffer = aligned_alloc(ALIGNMENT, sizeof(float) * n); mergesort(arr, buffer, n); free(buffer); } void mergesort_parallel(float * array, float * buffer, size_t n) { // there is no free lunch: this parameter will need tuning per-machine const size_t MIN_PARALLEL_N = (size_t)1e6; if (n < MIN_PARALLEL_N) { // for small n, use sequential code. Avoid creating very small tasks. mergesort(array, buffer, n/2); mergesort(array + n/2, buffer + n/2, n - n/2); } else { #pragma omp task mergesort_parallel(array, buffer, n/2); #pragma omp task mergesort_parallel(array + n/2, buffer + n/2, n - n/2); } // taskwait ensures that both mergesorts are completed before merging #pragma omp taskwait merge(array, n/2, array + n/2, n - n/2, buffer); } void mergeSortParallel(float* arr, size_t n) { float * buffer = aligned_alloc(ALIGNMENT, sizeof(float) * n); mergesort_parallel(arr, buffer, n); free(buffer); } // turn array[root:n] into max heap void heapify(float* arr, size_t n, size_t root) { int largest = root; int left = 2*root + 1; int right = 2*root + 2; // compare root to its children to see which is largest if (left < n && arr[left] > arr[largest]) largest = left; if (right < n && arr[right] > arr[largest]) largest = right; if (largest != root) { // then we are not done. Swap and heapify again swap(&arr[root], &arr[largest]); heapify(arr, n, largest); } } void heapSort(float* arr, size_t n) { // heapify all subtrees (create initial max heap) // we want to move from right to left, because big numbers will be towards // the right // start at i = n/2 because anything at indices greater than n/2 will be a // leaf node for (size_t i = n/2 - 1; i > 0; i--) { heapify(arr, n, i); } // last iteration (since size_t is unsigned we can't rely on it being // negative as a condition to end the for loop) heapify(arr, n, 0); // move largest element to end for (size_t i = n-1; i > 0; i--) { swap(&arr[0], &arr[i]); // heapify with 0 as root. Only heapify to i heapify(arr, i, 0); } } // recursive part of quicksort void quicksort(float* arr, size_t i, size_t n) { if ((n - i) < 2) return; // pivot is first item size_t pivot_i = i; for (size_t j = i+1; j < n; j++) { // check if other is less than pivot. If so, move pivot to the right and // place other immediately before pivot. if (arr[j] < arr[pivot_i]) { swap(&arr[pivot_i], &arr[pivot_i+1]); pivot_i++; // this case is important! This covers the instance where j immediately // follows the pivot, and so moving the pivot also swapped other before // the pivot. if(j != pivot_i) { swap(&arr[j], &arr[pivot_i-1]); } } } // recursively quicksort left and right halves quicksort(arr, i, pivot_i); quicksort(arr, pivot_i+1, n); } // wrapper to match function signature void quickSort(float* arr, size_t n) { quicksort(arr, 0, n); } // recursive part of modified quicksort void quicksort_modified(float* arr, size_t i, size_t n) { // insertion sort on small parts if (n - i < 16) { insertionSort(arr + i, n-i); return; } // the three indices for median-of-three size_t hi = n-1; size_t lo = i; size_t mid = lo + ((hi - lo) / 2); // these three if statements ensure elements at lo, mid, and hi are sorted in // relation to each other. This way we know that mid is our median-of-three. if (arr[mid] < arr[lo]) swap(&arr[mid], &arr[lo]); if (arr[hi] < arr[lo]) swap(&arr[hi], &arr[lo]); if (arr[mid] > arr[hi]) swap(&arr[mid], &arr[hi]); lo++; hi--; // check left side of pivot for anything that is larger than pivot while (lo < mid) { if (arr[lo] > arr[mid]) { swap(&arr[mid], &arr[mid-1]); mid--; if (lo != mid) swap(&arr[lo], &arr[mid+1]); } else lo++; } // check right side of pivot for anything smaller than pivot while (hi > mid) { if (arr[hi] < arr[mid]) { swap(&arr[mid], &arr[mid+1]); mid++; if (hi != mid) swap(&arr[hi], &arr[mid-1]); } else hi--; } // recursively quicksort left and right halves quicksort_modified(arr, i, mid); quicksort_modified(arr, mid+1, n); } // wrapper to match function signature void quickSortModified(float* arr, size_t n) { quicksort_modified(arr, 0, n); }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(Index shift=0; shift<threads; ++shift) { Index i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 #pragma omp critical { for(Index i=0; i<threads; ++i) #pragma omp atomic --(info[i].users); } } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; EIGEN_ALIGN_DEFAULT LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_DEFAULT RhsScalar m_staticB[SizeB]; public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index m = this->m_mc; Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
reduce_demo.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GraphBLAS.h" #if defined ( _OPENMP ) #include <omp.h> #endif // #define N 65536 #define N 16384 int main (void) { #if defined ( _OPENMP ) double t0 = omp_get_wtime ( ) ; #endif // start GraphBLAS GrB_init (GrB_NONBLOCKING) ; int nthreads ; GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads) ; printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ; int nthreads_max ; GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads_max) ; printf ("# of threads: %d\n", nthreads_max) ; #if defined ( _OPENMP ) t0 = omp_get_wtime ( ) - t0 ; printf ("GPU warmup time: %g\n", t0) ; t0 = omp_get_wtime ( ) ; #endif GrB_Index nrows = N ; GrB_Index ncols = N ; GrB_Matrix A ; GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ; GrB_Index *I = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ; GrB_Index *J = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ; int64_t *X = (int64_t *) malloc (nrows * ncols * sizeof (int64_t)) ; int64_t k ; #pragma omp parallel for num_threads(nthreads_max) schedule(static) for (k = 0 ; k < N*N ; k++) { // k = i * N + j ; int64_t i = k / N ; int64_t j = k % N ; // int x = (int) (rand ( ) & 0xFF) ; int x = (int) (k & 0xFF) ; I [k] = i ; J [k] = j ; X [k] = x ; } GrB_Index nvals = N*N ; GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ; free (I) ; free (J) ; free (X) ; #if defined ( _OPENMP ) t0 = omp_get_wtime ( ) - t0 ; printf ("time to create matrix: %g\n", t0) ; #endif GrB_Index result ; double t1 ; printf ("\nreduce to a scalar:\n") ; for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++) { GxB_Global_Option_set (GxB_GLOBAL_NTHREADS, nthreads) ; #if defined ( _OPENMP ) double t = omp_get_wtime ( ) ; #endif GrB_Matrix_reduce_UINT64 (&result, NULL, GrB_PLUS_MONOID_INT64, A, NULL) ; #if defined ( _OPENMP ) t = omp_get_wtime ( ) - t ; if (nthreads == 1) t1 = t ; printf ("nthreads %3d time: %12.6f speedup %8.2f\n", nthreads, t, t1/t) ; #endif } printf ("result %" PRId64 "\n", result) ; // free everyting GrB_Matrix_free (&A) ; GrB_finalize ( ) ; }
depend_iterator_bug.c
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -triple x86_64-unknown-linux-gnu \ // RUN: -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s // expected-no-diagnostics int x[100]; int y[100]; // CHECK-LABEL: @many_iterators_single_clause( // CHECK: [[VLA:%.*]] = alloca [[STRUCT_KMP_DEPEND_INFO:%.*]], i64 10, align 16 // CHECK: = call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* {{.*}}, i32 {{.*}}, i8* {{.*}}, i32 10, i8* {{.*}}, i32 0, i8* null) void many_iterators_single_clause(void) { #pragma omp task depend(iterator(j=0:5), in: x[j], y[j]) { } } // CHECK-LABEL: @many_iterators_many_clauses( // CHECK: [[VLA:%.*]] = alloca [[STRUCT_KMP_DEPEND_INFO:%.*]], i64 10, align 16 // CHECK: = call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* {{.*}}, i32 {{.*}}, i8* {{.*}}, i32 10, i8* {{.*}}, i32 0, i8* null) void many_iterators_many_clauses(void) { #pragma omp task depend(iterator(j=0:5), in: x[j]) \ depend(iterator(j=0:5), in: y[j]) { } }
decoder.c
/*! @file * @brief * * @version 1.0.0 * * (C) Copyright 2017 GoPro Inc (http://gopro.com/). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "config.h" #include "timing.h" #if WARPSTUFF #include "WarpLib.h" #endif //#include <stdlib.h> #include <stddef.h> #include <math.h> #include <memory.h> #include <time.h> //#include <stdint.h> #ifndef DEBUG #define DEBUG (1 && _DEBUG) #endif #ifndef TIMING #define TIMING (1 && _TIMING) #endif #ifndef XMMOPT #define XMMOPT (1 && _XMMOPT) #endif #define GEN_LICENSE 0 #ifndef PI #define PI 3.14159265359f #endif #ifdef _WIN32 #include <windows.h> #elif __APPLE__ #include "macdefs.h" #else #ifndef ZeroMemory #define ZeroMemory(p,s) memset(p,0,s) #endif #endif #include <stdio.h> #include <assert.h> #include <emmintrin.h> // Intel aligned alloc and free #include "dump.h" #include "decoder.h" #include "codec.h" #include "vlc.h" #include "codebooks.h" // References to the codebooks #include "debug.h" #include "color.h" // Color formats supported by image processing routines #include "image.h" #include "filter.h" #include "spatial.h" #include "temporal.h" //#include "logo40x5.h" #include "convert.h" #include "wavelet.h" #include "bitstream.h" #include "frame.h" #include "cpuid.h" #include "bayer.h" #include "metadata.h" #include "DemoasicFrames.h" //TODO: Change filename to lower case #include "swap.h" #include "draw.h" #include "RGB2YUV.h" #include "lutpath.h" #include "exception.h" extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain, int16_t *sptr, int resolution, int pixelsize); extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize); extern void FastSharpeningBlurVWP13(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); extern void FastSharpeningBlurVW13A(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); #ifdef SPI_LOADER #include "spi.h" #include "keyframes.h" #endif #ifndef DUMP #define DUMP (0 && _DUMP) #endif #define ERROR_TOLERANT 1 #if defined(_WIN32) && DEBUG #include <tchar.h> // For printing debug string in the console window #endif #define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform #define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform #if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch #if _DECODE_TRANSFORM == 0 #define _DECODE_TRANSFORM 1 #endif #endif #ifndef _FSMBUFFER #define _FSMBUFFER 0 #endif // Turn off saturation in this file #ifdef SATURATE #undef SATURATE #endif //#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x)) //#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x)) #define SATURATE8S(x) SATURATE_8S(x) #define SATURATE(x) (x) // Enable or disable function inlining #if 1 //DEBUG #define inline #else #define inline __forceinline #endif // Pixel size used for computing the compression ratio #define BITS_PER_PIXEL 8 // Default processor capabilities #define DEFAULT_FEATURES (_CPU_FEATURE_MMX ) #define DEMOSAIC_DELAYLINES 4 // Forward references void AllocDecoderGroup(DECODER *decoder); bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format); void EraseDecoderFrames(DECODER *decoder); TRANSFORM *AllocGroupTransform(GROUP *group, int channel); void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format); #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile); #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch); #endif bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision); extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index, uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading, uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes); //extern void ComputeCube(DECODER *decoder); extern bool NeedCube(DECODER *decoder); extern void LoadTweak(); //extern int g_topdown; //extern int g_bottomup; // Performance measurements #if _TIMING extern TIMER tk_decompress; // Timers extern TIMER tk_decoding; extern TIMER tk_convert; extern TIMER tk_inverse; extern COUNTER decode_byte_count; // Counters extern COUNTER sample_byte_count; extern COUNTER alloc_group_count; extern COUNTER alloc_transform_count; extern COUNTER alloc_buffer_count; extern COUNTER spatial_decoding_count; extern COUNTER temporal_decoding_count; extern COUNTER progressive_decode_count; #endif #if 0 // Table that maps from decoded format to pixel size static const int PixelSize[] = { 0, // DECODED_FORMAT_UNSUPPORTED 2, // DECODED_FORMAT_YUYV 2, // DECODED_FORMAT_UYVY 2, // DECODED_FORMAT_420 4, // DECODED_FORMAT_RGB32 3, // DECODED_FORMAT_RGB24 2, // DECODED_FORMAT_RGB555 2, // DECODED_FORMAT_RGB565 #if 0 2, // DECODED_FORMAT_YUYV_INVERTED 2, // DECODED_FORMAT_UYVY_INVERTED 2, // DECODED_FORMAT_420_INVERTED #endif 4, // DECODED_FORMAT_RGB32_INVERTED 3, // DECODED_FORMAT_RGB24_INVERTED 2, // DECODED_FORMAT_RGB555_INVERTED 2, // DECODED_FORMAT_RGB565_INVERTED 3, // DECODED_FORMAT_V210, 4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format. 4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel }; #if _DEBUG char *decoded_format_string[] = { "Unsupported", "YUYV", "UYUV", "420", "RGB32", "RGB24", "RGB555", "RGB565", #if 0 "YUYV Inverted", "UYVY Inverted", "420 Inverted", #endif //#if BUILD_PROSPECT "RGB32 Inverted", "RGB24 Inverted", "RGB555 Inverted", "RGB565 Inverted", "V210" //#endif }; #endif #else static const int pixel_size_table[] = { 0, // COLOR_FORMAT_UNKNOWN 2, // COLOR_FORMAT_UYVY 2, // COLOR_FORMAT_YUYV 2, // COLOR_FORMAT_YVYU 0, // COLOR_FORMAT_YV12 0, // COLOR_FORMAT_I420 2, // COLOR_FORMAT_RGB16 3, // COLOR_FORMAT_RGB24 4, // COLOR_FORMAT_RGB32 0, 3, // COLOR_FORMAT_V210 0, // COLOR_FORMAT_RGB10 4, // COLOR_FORMAT_YU64 4, // COLOR_FORMAT_YR16 4, // COLOR_FORMAT_YUVA }; static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]); static int PixelSize(int format) { int pixel_size = 0; // Mask off the other fields in the format descriptor // Use the lookup table to determine the pixel size (if possible) if (0 <= format && format < pixel_size_table_length) { pixel_size = pixel_size_table[format]; //return pixel_size; } //TODO: Change the rest of this routine into one big switch statement // Is this an Avid format? else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END) { switch (format) { case COLOR_FORMAT_CbYCrY_8bit: case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane pixel_size = 1; break; case COLOR_FORMAT_CbYCrY_16bit: case COLOR_FORMAT_CbYCrY_16bit_2_14: case COLOR_FORMAT_CbYCrY_16bit_10_6: pixel_size = 2; break; default: assert(0); pixel_size = 2; // Assume 16 bits per pixel if the format is unknown break; } } // Is this a Bayer format? else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END) { pixel_size = (format - 100); if(pixel_size > 2) pixel_size = 2; } else if (format == COLOR_FORMAT_RG48) pixel_size = 6; else if (format == COLOR_FORMAT_RG64) pixel_size = 8; else if (format == COLOR_FORMAT_B64A) { pixel_size = 8; } return pixel_size; } #endif int DecodedPixelSize(DECODED_FORMAT format) { int pixel_size = 0; // Compute the pixel size switch (format) { case DECODED_FORMAT_YUYV: pixel_size = 2; break; case DECODED_FORMAT_RGB32: pixel_size = 4; break; case DECODED_FORMAT_RG48: pixel_size = 6; break; case DECODED_FORMAT_CT_UCHAR: pixel_size = 2; break; case DECODED_FORMAT_CT_SHORT: case DECODED_FORMAT_CT_SHORT_2_14: case DECODED_FORMAT_CT_USHORT_10_6: pixel_size = 4; break; case DECODED_FORMAT_CT_10Bit_2_8: case DECODED_FORMAT_V210: // This routine should not be called to compute the pixel sizes for these formats assert(0); return 0; break; case DECODED_FORMAT_ROW16U: pixel_size = 4; break; default: assert(0); return 0; break; } return pixel_size; } #if 0 // Convert FOURCC code to a string static void str4cc(char *string, uint32_t marker) { char *p = (char *)&marker + 3; char *s = string; int i; for (i = 0; i < 4; i++) *(s++) = *(p--); *s = '\0'; } #endif void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h) { int origw,origh, guess = 0; origw = decoder->frame.width; origh = decoder->frame.height; switch(decoder->frame.resolution) { case DECODED_RESOLUTION_FULL: break; case DECODED_RESOLUTION_HALF: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: origw *= 8; origh *= 8; break; case DECODED_RESOLUTION_FULL_DEBAYER: break; case DECODED_RESOLUTION_HALF_NODEBAYER: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved. break; case DECODED_RESOLUTION_HALF_HORIZONTAL: origw *= 2; break; case DECODED_RESOLUTION_HALF_VERTICAL: origh *= 2; break; } if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0) guess = 1; // if guess default values, we can't trust them if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9) guess = 1; if(decoder->pixel_aspect_x && decoder->pixel_aspect_y) { int j,den,num; decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y; decoder->codec.picture_aspect_y = den = origh; for(j=2; j<num+den; j++) { while(num == (num/j)*j && den == (den/j)*j) { num /= j; den /= j; } } decoder->codec.picture_aspect_x = num; decoder->codec.picture_aspect_y = den; guess = 0; } if(guess) { if(origw > 720) //HD. { if(origh == 1080) { if(origw == 2048) *w=origw,*h=origh; else *w=16,*h=9; // assume 16x9 } else if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } else { if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } } else { *w=decoder->codec.picture_aspect_x; *h=decoder->codec.picture_aspect_y; } } bool IsValidFrameResolution(int resolution) { switch (resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF: case DECODED_RESOLUTION_QUARTER: case DECODED_RESOLUTION_LOWPASS_ONLY: case DECODED_RESOLUTION_HALF_HORIZONTAL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: return true; default: return false; } } // Return true if this decoder can decode to quarter resolution bool IsQuarterResolutionEnabled(DECODER *decoder) { return true; } size_t DecoderSize() { return sizeof(DECODER); } void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder); } #endif { //TODO: Clear the decoder before setting the CPU limit and affinity int i; //int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0; //save key params Thread_cntrl saved_params = decoder->thread_cntrl; // Clear everything memset(decoder, 0, sizeof(DECODER)); //restore key params if(saved_params.set_thread_params == 1) // used by the DShow Interface { decoder->thread_cntrl = saved_params; } #if _TIMING InitTiming(); #endif // Set the file for status information during decoding decoder->logfile = logfile; // Initialize the decoding error to no error decoder->error = CODEC_ERROR_OKAY; // Most recent marker found during decoding decoder->marker = 0; // Count of frames decoded decoder->frame_count = 0; // Set the codebooks that will be used for decoding if (cs != NULL) { // Use the codeset provided in the call for(i=0; i<CODEC_NUM_CODESETS; i++) { // Codebook for decoding highpass coefficients decoder->magsbook[i] = cs[i].magsbook; // Codebook for decoding runs of coefficients decoder->runsbook[i] = cs[i].runsbook; // Lookup table for fast codebook search decoder->fastbook[i] = cs[i].fastbook; } } else { // Use the default codeset decoder->magsbook[0] = cs9.magsbook; decoder->runsbook[0] = cs9.runsbook; decoder->fastbook[0] = cs9.fastbook; } // Initialize the codec state InitCodecState(&decoder->codec); InitScratchBuffer(&decoder->scratch, NULL, 0); #if _DUMP // Initialize the descriptor for controlling debug output decoder->dump.enabled = false; decoder->dump.channel_mask = 0; decoder->dump.wavelet_mask = 0; memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory)); memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename)); #endif } //REDTEST decoder->frm = 0; decoder->run = 1; #if _ALLOCATOR decoder->allocator = NULL; #endif decoder->initialized = 1; //DAN20060912 } void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey) { if (decoder && licensekey) { const unsigned char unlicensed[16] = {0}; //memset(unlicensed, 0, sizeof(unlicensed)); // Has the license been set? if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0) { // Copy the license into the decoder memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey)); } } } // Free data allocated within the decoder void ClearDecoder(DECODER *decoder) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Free the transforms allocated in the decoder int i; if(decoder->initialized == 0) return; // nothing to free //DAN20060912 #if _GRAPHICS DrawClose(decoder); #endif for(i=0; i<=METADATA_PRIORITY_MAX; i++) { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } if(decoder->sqrttable) { #if _ALLOCATOR Free(decoder->allocator, decoder->sqrttable); #else MEMORY_FREE(decoder->sqrttable); #endif decoder->sqrttable = NULL; } for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++) { #if _ALLOCATOR FreeTransform(allocator, decoder->transform[i]); #else FreeTransform(decoder->transform[i]); #endif decoder->transform[i] = NULL; } if(decoder->aligned_sample_buffer) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); #endif decoder->aligned_sample_buffer = NULL; decoder->aligned_sample_buffer_size = 0; } if(decoder->tools) { #if _ALLOCATOR Free(decoder->allocator, decoder->tools); #else MEMORY_FREE(decoder->tools); #endif decoder->tools = NULL; } // Free the buffer allocated for decoding if (decoder->buffer != NULL) { #if DEBUG_BUFFER_USAGE int i; char *ptr = (char *)decoder->buffer; FILE *fp = fopen("C:/free.txt", "a"); fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size); i = decoder->buffer_size-1; while(ptr[i] == 1) i--; fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size); fclose(fp); #endif #if _ALLOCATOR FreeAligned(allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; // Clear the fields in the scratch buffer descriptor memset(&decoder->scratch, 0, sizeof(SCRATCH)); // Eventually the buffer and buffer size fields will be obsolete } for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; // Do not attempt to free the codebooks since the // codebook pointers are references to static tables // Can free some of the data structures allocated by the decoder FreeCodebooks(decoder); #if _INTERLACED_WORKER_THREADS if(decoder->interlaced_worker.lock_init) // threads started { int i; // Signal this thread to stop SetEvent(decoder->interlaced_worker.stop_event); // Free all handles used by the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307 CloseHandle(decoder->interlaced_worker.handle[i]); CloseHandle(decoder->interlaced_worker.start_event[i]); CloseHandle(decoder->interlaced_worker.done_event[i]); } CloseHandle(decoder->interlaced_worker.row_semaphore); CloseHandle(decoder->interlaced_worker.stop_event); for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.handle[i] = 0; decoder->interlaced_worker.start_event[i] = 0; decoder->interlaced_worker.done_event[i] = 0; } decoder->interlaced_worker.row_semaphore = 0; decoder->interlaced_worker.stop_event = 0; } // Free the critical section used by the worker threads DeleteCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 0; #endif #if _THREADED if(decoder->entropy_worker_new.pool.thread_count) { ThreadPoolDelete(&decoder->entropy_worker_new.pool); DeleteLock(&decoder->entropy_worker_new.lock); } if(decoder->worker_thread.pool.thread_count) { ThreadPoolDelete(&decoder->worker_thread.pool); DeleteLock(&decoder->worker_thread.lock); } if(decoder->draw_thread.pool.thread_count) { ThreadPoolDelete(&decoder->draw_thread.pool); DeleteLock(&decoder->draw_thread.lock); } /* if(decoder->qt_convert_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_convert_worker.pool); DeleteLock(&decoder->qt_convert_worker.lock); } if(decoder->qt_scale_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_scale_worker.pool); DeleteLock(&decoder->qt_scale_worker.lock); } */ if(decoder->parallelDecoder) { if(decoder->parallelDecoder->decoder_thread.pool.thread_count) { ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool); DeleteLock(&decoder->parallelDecoder->decoder_thread.lock); decoder->parallelDecoder->decoder_thread.pool.thread_count = 0; } ClearDecoder(decoder->parallelDecoder); #if _ALLOCATOR Free(decoder->allocator, decoder->parallelDecoder); #else MEMORY_FREE(decoder->parallelDecoder); #endif decoder->parallelDecoder = NULL; } #endif //MEMORY_ALIGNED_FREE(RawBayer16); #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = 0; decoder->RGBFilterBufferSize = 0; } if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = 0; decoder->RawBayerSize = 0; } if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = 0; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { FreeAligned(decoder->allocator, decoder->RawCube); decoder->RawCube = 0; } if(decoder->Curve2Linear) { FreeAligned(decoder->allocator, decoder->Curve2Linear); decoder->Curve2Linear = 0; } if(decoder->Linear2CurveRed) { FreeAligned(decoder->allocator, decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { FreeAligned(decoder->allocator, decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { FreeAligned(decoder->allocator, decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->BYR4LinearRestore) { FreeAligned(decoder->allocator, decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->GammaContrastRed) { FreeAligned(decoder->allocator, decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { FreeAligned(decoder->allocator, decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { FreeAligned(decoder->allocator, decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) Free(decoder->allocator, decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(decoder->mesh); decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLensGoPro = 0; decoder->lastLensSphere = 0; decoder->lastLensFill = 0; decoder->lastLensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; } #endif if(decoder->overrideData) { Free(decoder->allocator, decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) Free(decoder->allocator, decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; } if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { MEMORY_ALIGNED_FREE(decoder->RawCube); decoder->RawCube = NULL; } if(decoder->Curve2Linear) { MEMORY_ALIGNED_FREE(decoder->Curve2Linear); decoder->Curve2Linear = NULL; } if(decoder->BYR4LinearRestore) { MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->Linear2CurveRed) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->GammaContrastRed) { MEMORY_ALIGNED_FREE(decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) MEMORY_FREE(decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(mesh); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLlensGoPro = 0; decoder->lastLlensSphere = 0; decoder->lastLlensFill = 0; decoder->lastLlensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); } #endif if(decoder->overrideData) { MEMORY_FREE(decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) MEMORY_FREE(decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #endif #ifdef SPI_LOADER SPIReleaseAll(decoder); //KeyframesReleaseAll(decoder); #endif decoder->initialized = 0;// cleared } void ExitDecoder(DECODER *decoder) { // Let the caller keep the logfile open or choose to close it //if (logfile) fclose(logfile); // Free data allocated within the decoder ClearDecoder(decoder); } // Allocate the data structures for decoding a group void AllocDecoderGroup(DECODER *decoder) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels;//DAN07022004 int channel; assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004 for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004 { TRANSFORM *transform = decoder->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) { decoder->error = CODEC_ERROR_TRANSFORM_MEMORY; return; } memset(transform, 0, sizeof(TRANSFORM)); decoder->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } } } // Allocate the buffer used for intermediate results during decoding bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format) { int cpus; size_t size; size_t row_size; char *buffer; #if 0 // Allocate a buffer large enough for six rows of cache lines size = width * sizeof(PIXEL); size = ALIGN(size, _CACHE_LINE_SIZE); size = 2 * TRANSFORM_MAX_CHANNELS * size; #else // Allocate a buffer large enough for nine rows of cache lines size = width * sizeof(PIXEL) * 4; size = ALIGN(size, _CACHE_LINE_SIZE); size = 3 * TRANSFORM_MAX_CHANNELS * size; #endif switch (format) { case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: // Increase the buffer size for decoding to the V210 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_YR16: case DECODED_FORMAT_CbYCrY_10bit_2_8: case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: // Increase the buffer size for decoding to the YUV16 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 8 * 2 * row_size; break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_WP13: // Increase the buffer size for decoding to the YUV16 format row_size = 6 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 12 * 2 * row_size; break; case DECODED_FORMAT_RG64: // Increase the buffer size for decoding to the YUV16 format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; case DECODED_FORMAT_BYR3: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_BYR4: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_W13A: // Increase the buffer size for decoding to the B64A format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; default: // Increase the buffer size for YUV to RGB conversion row_size = 3 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 2 * 2 * row_size; break; } cpus = decoder->thread_cntrl.capabilities >> 16; if(cpus > 4) size *= 4; if(cpus > 16) //DAN20120803 -- 4444 clips size *= 2; // Has a buffer already been allocated? if (decoder->buffer != NULL) { // Is the buffer large enough? if (decoder->buffer_size < size) { // Free the previous buffer #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; } else { return true; } } buffer = decoder->buffer; if(buffer == NULL) { // Allocate the decoding buffer #if _ALLOCATOR buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(buffer == NULL) { return false; } } #if DEBUG_BUFFER_USAGE memset(buffer, 1, size); #endif // Save the buffer and its size in the decoder decoder->buffer = buffer; decoder->buffer_size = size; // Initialize the scratch space descriptor InitScratchBuffer(&decoder->scratch, buffer, size); // allocate buffer for each debayer/color formating thread { int i; size = (width+16)*3*2*4*2*4;// sixteen lines if(height*4 > width*3) //square or tall images where running out of scratch space for zooms. size *= 1 + ((height+(width/2))/width); if (decoder->threads_buffer_size < size) { for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; } for(i=0;i<cpus;i++) { if(decoder->threads_buffer[i] == NULL) { #if _ALLOCATOR decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(decoder->threads_buffer[i] == NULL) { return false; } } } decoder->threads_buffer_size = size; } // Eventually the scratch space descriptor will replace the buffer and buffer_size fields return true; } bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format) { // Check that the dimensions are valid assert(width > 0); assert(height > 0); // Just call the allocation routine return AllocDecoderBuffer(decoder, width, height, format); } void ClearTransformFlags(DECODER *decoder) { TRANSFORM **transform_array = decoder->transform; int channel; for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++) { TRANSFORM *transform = transform_array[channel]; int index; if (transform == NULL) break; for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++) { IMAGE *wavelet = transform->wavelet[index]; if (wavelet != NULL) { wavelet->band_valid_flags = 0; wavelet->band_started_flags = 0; } } } } // Initialize the tables for decoding the wavelet transforms void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands) { size_t subband_table_size = num_subbands * sizeof(int); memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index)); memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size); memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index)); memcpy(decoder->subband_band_index, subband_band_index, subband_table_size); } #if 0 static bool IsValidFormat(int format) { bool valid_format = true; //TODO: Change this routine into a switch statement if(format == COLOR_FORMAT_BYR5) return true; // can decode to BYR5 if(format == COLOR_FORMAT_BYR4) return true; // can decode to BYR4 if(format == COLOR_FORMAT_BYR3) return true; // can decode to BYR3 if(format == COLOR_FORMAT_BYR2) return true; // can decode to BYR2 if(format == COLOR_FORMAT_RG48) return true; // can decode to RGB48 if(format == COLOR_FORMAT_RG64) return true; // can decode to RGBA64 if (format == COLOR_FORMAT_B64A) { return true; // Can decode to B64A } if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) { valid_format = false; } return valid_format; } #endif #if _INTERLACED_WORKER_THREADS void StartInterlaceWorkerThreads(DECODER *decoder) { int i; if(decoder->interlaced_worker.lock_init == 0) { // Create events for starting the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL); } // Create a semaphore to signal the worker threads to process rows decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL); // Create an event for each worker thread to signal that it has finished for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL); } // Create an event for forcing the worker threads to terminate decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL); // Zero the count of worker threads that are active decoder->interlaced_worker.thread_count = 0; // Initialize the lock for controlling access to the worker thread data InitializeCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 1; for (i = 0; i < THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.id[i] = 0; decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]); assert(decoder->interlaced_worker.handle[i] != NULL); } } } #endif #if 0 int TestException(int x) { static volatile int y1 = 100; volatile int x1 = x; return y1 / x1; } #endif // Process device driver request to initialize the decoder #if _ALLOCATOR bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #else bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #endif { CODESET codesets[CODEC_NUM_CODESETS]; int i; int cpus; //int x = 0; #if CODEC_NUM_CODESETS == 3 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET)); #elif CODEC_NUM_CODESETS == 2 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); #else memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); #endif #ifdef _WIN32 // Set the handler for system exceptions SetDefaultExceptionHandler(); #endif //TestException(x); // Clear all decoder fields except the logfile and set the codebooks for decoding InitDecoder(decoder, logfile, &codesets[0]); #if _ALLOCATOR decoder->allocator = allocator; #endif if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; assert(cpus > 0 && cpus <= _MAX_CPUS); // Decode to half resolution? if (resolution == DECODED_RESOLUTION_HALF) { // Reduce the frame size by half in each dimension width = width/2; height = height/2; } else if (resolution == DECODED_RESOLUTION_QUARTER) { // Reduce the frame size by one fourth in each dimension width = width/4; height = height/4; } // Initialize the codebooks #if _ALLOCATOR if (!InitCodebooks(decoder->allocator, codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #else if (!InitCodebooks(codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #endif // Initize the FSM InitDecoderFSM(decoder, &codesets[0]); // Check the frame dimensions and format //assert(width > 0); //assert(height > 0); // assert(IsValidFormat(format)); #if _THREADED_DECODER // Create a semaphore to signal the transform thread to begin processing // Initialize the transform queue decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue)); #endif #if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0 StartInterlaceWorkerThreads(decoder); #endif #if _THREADED #if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } // Initialize the lock that controls access to the generic worker thread data CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, cpus, WorkerThreadProc, decoder); #endif #endif // Set the frame dimensions and format SetDecoderFormat(decoder, width, height, format, resolution); // Allocate the data structure for decoding the samples AllocDecoderGroup(decoder); // Note that this code assumes that the samples to decode are groups // as opposed to isolated frames which are not supported in this code // Allocate a buffer for storing intermediate results during decoding if (!AllocDecoderBuffer(decoder, width, height, format)) { return false; } // Should check that the finite state machine tables were initialized assert(decoder->fsm[0].table.flags < 0); // Initialize the finite state machine for this decoder for(i=0; i<CODEC_NUM_CODESETS; i++) { InitFSM(&decoder->fsm[i], codesets[i].fsm_table); #if _COMPANDING // Scale the values in the finite state machine entries for companding ScaleFSM(&decoder->fsm[i].table); #endif } // Indicate that the decoder has been initialized decoder->state = DECODER_STATE_INITIALIZED; #if (1 && DUMP) // Write the wavelet bands as images SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY); SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME); SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/); // SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/); SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX); // Set this flag to enable output decoder->dump.enabled = true; #endif #if _TIMING // Initialize the global timers and counters InitTiming(); #endif //DAN20160203 Fix for a memory leak in InitCookbooks for (i = 0; i < CODEC_NUM_CODESETS; i++) { #if _ALLOCATOR Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL; Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL; #else MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL; MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL; #endif } // The decoder has been initialized successfully return true; } void DecodeEntropyInit(DECODER *decoder) { int cpus = 1; if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit) { cpus = decoder->cfhddata.cpu_limit; decoder->thread_cntrl.limit = cpus; decoder->thread_cntrl.set_thread_params = 1; decoder->thread_cntrl.capabilities &= 0xffff; decoder->thread_cntrl.capabilities |= cpus<<16; } assert(cpus > 0 && cpus <= _MAX_CPUS); #if _THREADED #if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } #endif #endif } bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize) { if(decoder->overrideData) { #if _ALLOCATOR Free(decoder->allocator, decoder->overrideData); #else MEMORY_FREE(decoder->overrideData); #endif decoder->overrideData = NULL; decoder->overrideSize = 0; } if(overrideSize) { #if _ALLOCATOR decoder->overrideData = Alloc(decoder->allocator, overrideSize); #else decoder->overrideData = MEMORY_ALLOC(overrideSize); #endif if(decoder->overrideData) { memcpy(decoder->overrideData, overrideData, overrideSize); decoder->overrideSize = overrideSize; } } else { int i; for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker. // This database cleariing was added but I don't know why. { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } } return true; } TRANSFORM *AllocGroupTransform(GROUP *group, int channel) { #if _ALLOCATOR //TODO:ALLOC Change this routine to take an allocator as the first argument ALLOCATOR *allocator = NULL; #endif TRANSFORM *transform; // Channel zero is a special case because it may mean // that the group header has not been decoded yet if (channel != 0) { // Make sure that the channel number is in range assert(0 <= channel && channel < group->header.num_channels); if (!(0 <= channel && channel < group->header.num_channels)) return NULL; } transform = group->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) return NULL; memset(transform, 0, sizeof(TRANSFORM)); group->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } return transform; } //extern FILE *logfile; void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format) { size_t size = height * pitch; union { uint8_t byte[4]; uint32_t word; } output; switch (format) { case DECODED_FORMAT_YUYV: output.byte[0] = COLOR_LUMA_BLACK; output.byte[1] = COLOR_CHROMA_ZERO; output.byte[2] = COLOR_LUMA_BLACK; output.byte[3] = COLOR_CHROMA_ZERO; break; default: //if (logfile) fprintf(logfile,"**Unknown format: %d\n", format); //assert(0); output.word = 0; break; } memset(buffer, output.word, size); } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband); // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet); // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading); // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band); bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input); // Apply the inverse horizontal-temporal transform to reconstruct the output frame void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size); #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision); #endif // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // The first Bayer routine calls the other Bayer routines for the decoded resolution CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); // New code for handling the original YUV 4:2:2 encoded format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // Return true if the rest of the channel does not have to be decoded static bool CanSkipChannel(DECODER *decoder, int resolution) { CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int transform_type = transform->type; // Can the rest of the channel be skipped? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } else { const uint32_t decoded_subband_mask_half = 0x7F; const uint32_t decoded_subband_mask_quarter = 0x0F; //assert(transform_type == TRANSFORM_TYPE_SPATIAL); if (transform_type != TRANSFORM_TYPE_SPATIAL) { decoder->error = CODEC_ERROR_BAD_FRAME; return true; } switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } // Cannot skip the rest of the channel return false; } #if 0 static bool CanSkipSubband(DECODER *decoder, int subband) { // Bitmask indicates which subbands must be decoded for quarter resolution static uint32_t quarter_resolution_mask = 0x008F; // Convert the subband number into a bitmask (could use a lookup table) uint32_t subband_mask = SUBBAND_MASK(subband); // Select the resolution of the fully decoded frames int resolution = decoder->frame.resolution; switch (resolution) { case DECODED_RESOLUTION_QUARTER: //if (4 <= subband && subband <= 6) if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if ((subband_mask & quarter_resolution_mask) == 0) { return true; } } break; default: // Assume that the subband must be decoded break; } return false; } #endif // Return true if the wavelet exists and all bands are valid static bool AllBandsValid(IMAGE *wavelet) { return (wavelet != NULL && BANDS_ALL_VALID(wavelet)); } #if DEBUG || 1 static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { //assert(0); return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { //assert(0); return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; //if (!AllBandsValid(wavelet)) if(wavelet == NULL) { return false; } } // All wavelet bands in all channels are valid return true; } static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) { return false; } } // All lowpass bands in all channels are valid return true; } #endif static bool ComputeFrameDimensionsFromFirstWavelet(int transform_type, int first_wavelet_width, int first_wavelet_height, int *frame_width_out, int *frame_height_out) { int frame_width; int frame_height; int expansion = 8; switch (transform_type) { case TRANSFORM_TYPE_SPATIAL: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; case TRANSFORM_TYPE_FIELDPLUS: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; default: //assert(0); return false; } // Return the frame dimensions *frame_width_out = frame_width; *frame_height_out = frame_height; return true; } // Decode the sample header to determine the type of sample and other parameters bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header) { TAGVALUE segment; int sample_type; int sample_size = 0; // Group index uint32_t channel_size[TRANSFORM_MAX_CHANNELS]; // Number of channels in the group index int channel_count = 0; // Values used for computing the frame width and height (if necessary) int transform_type = -1; int first_wavelet_width = 0; int first_wavelet_height = 0; int display_height = 0; int current_channel = 0; int currentVideoChannel = header->videoChannels; int find_lowpass_bands = header->find_lowpass_bands & 1; int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0; int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0; if (header == NULL) { return false; } if(currentVideoChannel == 0) currentVideoChannel = 1; // Clear the entire sample header to prevent early return from this routine memset(header, 0, sizeof(SAMPLE_HEADER)); // Clear the error code header->error = CODEC_ERROR_OKAY; // Initialize the frame dimensions to unknown header->width = 0; header->height = 0; header->videoChannels = 1; // Initialize the original pixel format to unknown header->input_format = COLOR_FORMAT_UNKNOWN; // Initialize the encoded format to unknown header->encoded_format = ENCODED_FORMAT_UNKNOWN; // Clear the frame number in case it is not present in the sample header->frame_number = 0; // The video is not progressive if the sample flags are not present header->hdr_progressive = false; #if _BITSTREAM_UNALIGNED // Record the alignment of the bitstream within the sample SetBitstreamAlignment(input, 0); #endif sample_size = input->nWordsUsed; // Get the type of sample (should be the first tag value pair) segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { header->error = CodecErrorBitstream(input); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames header->key_frame = true; header->difference_frame = false; header->droppable_frame = false; break; case SAMPLE_TYPE_FRAME: // The second or later frame in a group header->key_frame = false; header->difference_frame = true; header->droppable_frame = true; break; case SAMPLE_TYPE_IFRAME: // One frame in the group header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: // Treat the video sequence header like a keyframe that can be dropped header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; default: // Unknown type of sample header->error = CODEC_ERROR_SAMPLE_TYPE; return false; break; } // Continue parsing the sample header until all of the information has been found while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all (find_uncompressed == 1 && current_channel < 1) || display_height == 0 || header->width == 0 || header->height == 0 || header->input_format == COLOR_FORMAT_UNKNOWN || header->frame_number == 0 || (header->interlaced_flags == 0 && header->hdr_progressive == 0)) { int chunksize = 0; // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did the bitstream end before the last tag was found? if (input->error == BITSTREAM_ERROR_UNDERFLOW) { break; } // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { header->error = CodecErrorBitstream(input); return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } if(segment.tuple.tag & 0x2000) { chunksize = segment.tuple.value; chunksize &= 0xffff; chunksize += ((segment.tuple.tag&0xff)<<16); } else if(segment.tuple.tag & 0x4000) { chunksize = segment.tuple.value; chunksize &= 0xffff; } // else if(tag == CODEC_TAG_INDEX) // handled below // { // chunksize = value; // chunksize &= 0xffff; // } else { chunksize = 0; } if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000) { int skip = 1; if((segment.tuple.tag & 0xff00) == 0x2200) //sample size { if(sample_size < chunksize*4) find_header_info_only = 1; skip = find_header_info_only; if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only) { BITSTREAM input2; SAMPLE_HEADER header2; BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4); int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx int eye_sample_size = input->nWordsUsed - eye_offset; // Search for first sample of the next frame while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0) { eye2 += 4; chunksize ++; eye_offset += 4; eye_sample_size -= 4; } // Save the offset to the right stereo sample header->left_sample_size = eye_offset; { InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ); memset(&header2, 0, sizeof(SAMPLE_HEADER)); header2.find_lowpass_bands = 1; currentVideoChannel++; header2.videoChannels = currentVideoChannel; if(ParseSampleHeader(&input2, &header2)) { int i; for(i=0;i<4;i++) { if(header2.thumbnail_channel_offsets[i]) header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i]; } } } } } if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size { header->hdr_uncompressed = 1; skip = 1; if(find_lowpass_bands != 1) break; } if((segment.tuple.tag & 0xff00) == 0x2100) //level { if(find_lowpass_bands == 1) { skip = 0; } else { skip = 1; // no header data after the fix level break; } } if(chunksize) { if(skip) { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } } else { TAGWORD value = segment.tuple.value; switch (segment.tuple.tag) { case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. header->encoder_version = (((value>>12) & 0xf)<<16) | (((value>>8) & 0xf)<<8) | ((value) & 0xff); break; case CODEC_TAG_INDEX: // Get the number of channels in the index to skip channel_count = value; if (channel_count <= TRANSFORM_MAX_CHANNELS) DecodeGroupIndex(input, (uint32_t*)&channel_size[0], channel_count); else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_WIDTH: // Record the frame width in the sample header if (value > 0 && value <= 32768) header->width = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_HEIGHT: // Record the frame height in the sample header if (value > 0 && value <= 32768) header->height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_DISPLAY_HEIGHT: if (value > 0 && (int)value >= (int)header->height-16 && (int)value <= (int)header->height) display_height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_LOWPASS_WIDTH: // Save the width of the smallest wavelet for computing the frame dimensions if (value > 0 && value < (int)header->width / 4) first_wavelet_width = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_LOWPASS_HEIGHT: // Save the height of the smallest wavelet for computing the frame dimensions if (value > 0 && value < (int)header->height / 4) first_wavelet_height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_TRANSFORM_TYPE: // Save the type of transform for computing the frame dimensions (if necessary) if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST) transform_type = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_INPUT_FORMAT: // Record the original format of the encoded frames header->input_format = (COLOR_FORMAT)value; break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: // Record the encoded format (internal representation) header->encoded_format = (ENCODED_FORMAT)value; if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3) header->encoded_format = ENCODED_FORMAT_RGB_444; break; case CODEC_TAG_FRAME_NUMBER: // Record the frame number for debugging header->frame_number = value; break; case CODEC_TAG_INTERLACED_FLAGS: // Record the flags that indicate the field type header->interlaced_flags = value; break; case CODEC_TAG_SAMPLE_FLAGS: // The sample flags specify progressive versus interlaced decoding header->hdr_progressive = !!(value & SAMPLE_FLAGS_PROGRESSIVE); if (header->hdr_progressive) { // Clear the interlaced flags header->interlaced_flags = 0; } break; case CODEC_TAG_LOWPASS_SUBBAND: if(value == 0) // low pass band { int count = 8; uint32_t *lptr = (uint32_t *)input->lpCurrentWord; do { uint32_t longword = SwapInt32(lptr[count]); unsigned short t,v; t = (longword>>16) & 0xffff; v = (longword) & 0xffff; if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4) { header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4; break; } count++; } while(count < 32); current_channel++; } break; case CODEC_TAG_ENCODED_CHANNELS: if(header->videoChannels == 1) { header->videoChannels = value; if(header->videoChannels < 1) header->videoChannels = 1; if (header->videoChannels > 2) { header->width = header->height = 0; return false; } } break; case CODEC_TAG_QUALITY_L: // header->encode_quality &= 0xffff0000; header->encode_quality |= value; break; case CODEC_TAG_QUALITY_H: // header->encode_quality &= 0xffff; header->encode_quality |= value<<16; break; } // Have the encoded frame dimensions been computed? if (header->width == 0 || header->height == 0) { // Found the first wavelet in the bitstream? if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0) { // The group header did not contain tags for the frame dimensions // prior to the release of support for RGB 4:4:4, so must attempt to // compute the frame dimensions from the dimensions of the lowpass band. int frame_width = 0; int frame_height = 0; // Use the dimensions of the first wavelet to compute the frame width and height if (!ComputeFrameDimensionsFromFirstWavelet(transform_type, first_wavelet_width, first_wavelet_height, &frame_width, &frame_height)) { // Could not compute the frame dimensions header->error = CODEC_ERROR_FRAME_DIMENSIONS; return false; } // Save the frame dimensions in the sample header header->width = frame_width; header->height = frame_height; // No more header information after finding the lowpass band break; } } if(find_lowpass_bands != 1 && find_uncompressed != 1) { // No more header information after the first encoded band if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER) { // Stop looking for header information break; } // No more header information after the frame index if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX) { // Stop looking for header information break; } // No more header information after the lowpass band header if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH) { // Stop looking for header information break; } } } } } if (header->width == 0 || header->height == 0) { //assert(0); return false; } // Fill in the encoded format if it was not present in the header if (header->encoded_format == ENCODED_FORMAT_UNKNOWN && channel_count > 0) { header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count); } if (display_height > 0) { header->height = display_height; } if (header->encoded_format == ENCODED_FORMAT_BAYER) { header->width *= 2; header->height *= 2; if(display_height == 0) { if(header->height == 1088) header->height = 1080; } } // Return true if the header was parsed completely and correctly return (header->width > 0 && header->height > 0 && ((sample_type == SAMPLE_TYPE_FRAME) || (header->input_format != COLOR_FORMAT_UNKNOWN && header->encoded_format != ENCODED_FORMAT_UNKNOWN))); // It is not an error if the frame number was not found in the sample header } bool DumpSampleHeader(BITSTREAM *input, FILE *logfile) { TAGVALUE segment; int lowpass_width = 0; int lowpass_height = 0; // Parse the sample header until the lowpass band is found while (lowpass_width == 0 && lowpass_height == 0) { // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } // Check that the tag is valid assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED); switch (segment.tuple.tag) { case CODEC_TAG_SAMPLE: fprintf(logfile, "Sample type: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_WIDTH: fprintf(logfile, "Frame width: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_HEIGHT: fprintf(logfile, "Frame height: %d\n", segment.tuple.value); break; case CODEC_TAG_LOWPASS_WIDTH: lowpass_width = segment.tuple.value; fprintf(logfile, "Lowpass width: %d\n", lowpass_width); break; case CODEC_TAG_LOWPASS_HEIGHT: lowpass_height = segment.tuple.value; fprintf(logfile, "Lowpass height: %d\n", lowpass_height); break; case CODEC_TAG_TRANSFORM_TYPE: fprintf(logfile, "Transform type: %d\n", segment.tuple.value); break; case CODEC_TAG_INPUT_FORMAT: fprintf(logfile, "Input format: %d\n", segment.tuple.value); break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: fprintf(logfile, "Encoded format: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_NUMBER: fprintf(logfile, "Frame number: %d\n", segment.tuple.value); break; } } return true; } int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work { TAGWORD tag,value=1; unsigned char *pos = NULL; int readsize = input->nWordsUsed; if(readsize > 4096) // only need to scan the first few tuplets { readsize = 4096; } else { //Tiny therefore P-frame, nothing to be read so: value=decoder->real_channels; // return the last value. return value; } pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value); if(pos && value>1 && skip_to_channel>1) { int chunksize = 0; intptr_t offset; int count = 0; do { tag = *pos++<<8; tag |= *pos++; value = *pos++<<8; value |= *pos++; if (tag < 0) { tag = NEG(tag); } } while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10); if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4; input->lpCurrentWord += offset; input->nWordsUsed -= (int)offset; { uint8_t *tag = (uint8_t *)input->lpCurrentWord; // Search for first sample of the next frame while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0) { input->lpCurrentWord += 4; input->nWordsUsed -= 4; tag += 4; } } } } //if(value == 0) value = 1; // old non-stereo file return value; } #define SUBPIXEL 64 static short gains[SUBPIXEL+1][4] = { {0*128,0*128,0x7fff,0*128}, {0*128,2*128,0x7fff,-2*128}, {0*128,5*128,255*128,-4*128}, {0*128,8*128,254*128,-6*128}, {0*128,11*128,253*128,-8*128}, {0*128,14*128,252*128,-10*128}, {0*128,18*128,250*128,-12*128}, {0*128,21*128,248*128,-13*128}, {-1*128,25*128,247*128,-15*128}, {-1*128,29*128,244*128,-16*128}, {-1*128,33*128,241*128,-17*128}, {-2*128,37*128,239*128,-18*128}, {-2*128,41*128,236*128,-19*128}, {-3*128,46*128,233*128,-20*128}, {-3*128,50*128,229*128,-20*128}, {-4*128,55*128,226*128,-21*128}, {-4*128,60*128,221*128,-21*128}, {-5*128,65*128,217*128,-21*128}, {-5*128,70*128,213*128,-22*128}, {-6*128,75*128,209*128,-22*128}, {-7*128,80*128,205*128,-22*128}, {-7*128,85*128,199*128,-21*128}, {-8*128,91*128,194*128,-21*128}, {-9*128,96*128,190*128,-21*128}, {-10*128,102*128,185*128,-21*128}, {-10*128,107*128,179*128,-20*128}, {-11*128,113*128,174*128,-20*128}, {-12*128,118*128,169*128,-19*128}, {-13*128,124*128,164*128,-19*128}, {-14*128,129*128,159*128,-18*128}, {-14*128,135*128,152*128,-17*128}, {-15*128,141*128,147*128,-17*128}, {-16*128,144*128,144*128,-16*128}, {-17*128,147*128,141*128,-15*128}, {-17*128,152*128,135*128,-14*128}, {-18*128,159*128,129*128,-14*128}, {-19*128,164*128,124*128,-13*128}, {-19*128,169*128,118*128,-12*128}, {-20*128,174*128,113*128,-11*128}, {-20*128,179*128,107*128,-10*128}, {-21*128,185*128,102*128,-10*128}, {-21*128,190*128,96*128,-9*128}, {-21*128,194*128,91*128,-8*128}, {-21*128,199*128,85*128,-7*128}, {-22*128,205*128,80*128,-7*128}, {-22*128,209*128,75*128,-6*128}, {-22*128,213*128,70*128,-5*128}, {-21*128,217*128,65*128,-5*128}, {-21*128,221*128,60*128,-4*128}, {-21*128,226*128,55*128,-4*128}, {-20*128,229*128,50*128,-3*128}, {-20*128,233*128,46*128,-3*128}, {-19*128,236*128,41*128,-2*128}, {-18*128,239*128,37*128,-2*128}, {-17*128,241*128,33*128,-1*128}, {-16*128,244*128,29*128,-1*128}, {-15*128,247*128,25*128,-1*128}, {-13*128,248*128,21*128,0*128}, {-12*128,250*128,18*128,0*128}, {-10*128,252*128,14*128,0*128}, {-8*128,253*128,11*128,0*128}, {-6*128,254*128,8*128,0*128}, {-4*128,255*128,5*128,0*128}, {-2*128,0x7fff,2*128,0*128}, {0*128,0*128,0x7fff,0*128} }; static int lanczos[256] = { 0, -2, -8, -18, -33, -53, -77, -106, -141, -179, -223, -272, -325, -384, -447, -514, -586, -662, -742, -826, -913, -1004, -1097, -1193, -1290, -1389, -1490, -1591, -1692, -1792, -1892, -1990, -2086, -2179, -2269, -2355, -2436, -2511, -2580, -2643, -2697, -2744, -2781, -2809, -2826, -2832, -2826, -2808, -2776, -2730, -2670, -2594, -2503, -2395, -2271, -2129, -1969, -1790, -1593, -1377, -1141, -886, -611, -315, 0, 336, 692, 1069, 1466, 1884, 2321, 2778, 3255, 3750, 4265, 4797, 5347, 5914, 6498, 7097, 7711, 8340, 8982, 9636, 10301, 10977, 11663, 12357, 13058, 13765, 14477, 15192, 15910, 16630, 17349, 18066, 18781, 18871, 19580, 20285, 20986, 21678, 22361, 23035, 23697, 24348, 24983, 25604, 26206, 26790, 27354, 27898, 28419, 28915, 29387, 29832, 30249, 30638, 30997, 31326, 31623, 31886, 32117, 32314, 32476, 32603, 32695, 32749, 32767, //was 32768, issue for SSE2 32749, 32695, 32603, 32476, 32314, 32117, 31886, 31623, 31326, 30997, 30638, 30249, 29832, 29387, 28915, 28419, 27898, 27354, 26790, 26206, 25604, 24983, 24348, 23697, 23035, 22361, 21678, 20986, 20285, 19580, 18871, 18159, 18066, 17349, 16630, 15910, 15192, 14477, 13765, 13058, 12357, 11663, 10977, 10301, 9636, 8982, 8340, 7711, 7097, 6498, 5914, 5347, 4797, 4265, 3750, 3255, 2778, 2321, 1884, 1466, 1069, 692, 336, 0, -315, -611, -886, -1141, -1377, -1593, -1790, -1969, -2129, -2271, -2395, -2503, -2594, -2670, -2730, -2776, -2808, -2826, -2832, -2826, -2809, -2781, -2744, -2697, -2643, -2580, -2511, -2436, -2355, -2269, -2179, -2086, -1990, -1892, -1792, -1692, -1591, -1490, -1389, -1290, -1193, -1097, -1004, -913, -826, -742, -662, -586, -514, -447, -384, -325, -272, -223, -179, -141, -106, -77, -53, -33, -18, -8, -2, }; void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom) { float yposf,ystepf; int x; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; offset = -offset; yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: step = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: step = 32; break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { __m128i half; gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outline128 = (__m128i *)dst; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = o128; lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); _mm_storeu_si128(outline128++, o128); } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } /*ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int r,g,b,yp = ((int)yposf); yposf += ystepf; if(yp<0 || yp>= height) { memset(dst, 0, widthbytes); } else { memcpy(dst, &ptr[widthbytes*yp], widthbytes); } dst += pitch; }*/ } } void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom, int xx) { float yposf,ystepf; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos; uint8_t *outlinePos8; uint16_t *outlinePos16; offset = -offset; //yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: step = 4; break; case DECODED_FORMAT_RGB24: step = 3; break; case DECODED_FORMAT_YUYV: step = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: step = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: step = 6; break; default: assert(0); break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outlinePos8 = (uint8_t *)dst; outlinePos16 = (uint16_t *)dst; lineAPos = (uint8_t *)scanline[0]; lineBPos = (uint8_t *)scanline[1]; lineCPos = (uint8_t *)scanline[2]; lineDPos = (uint8_t *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_WP13: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RG64: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_RG48: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); outlinePos8[1] = _mm_extract_epi16(o128, 1); outlinePos8[2] = _mm_extract_epi16(o128, 2); outlinePos8[3] = _mm_extract_epi16(o128, 3); outlinePos8+=4; break; case DECODED_FORMAT_RGB24: { int r,g,b; b = ((lineAPos[0] * gains[0])>>7) + ((lineBPos[0] * gains[1])>>7) + ((lineCPos[0] * gains[2])>>7) + ((lineDPos[0] * gains[3])>>7); //16-bit g = ((lineAPos[1] * gains[0])>>7) + ((lineBPos[1] * gains[1])>>7) + ((lineCPos[1] * gains[2])>>7) + ((lineDPos[1] * gains[3])>>7); //16-bit r = ((lineAPos[2] * gains[0])>>7) + ((lineBPos[2] * gains[1])>>7) + ((lineCPos[2] * gains[2])>>7) + ((lineDPos[2] * gains[3])>>7); //16-bit if(r<0) r = 0; if(r>65535) r = 65535; if(g<0) g = 0; if(g>65535) g = 65535; if(b<0) b = 0; if(b>65535) b = 65535; lineAPos+=3; lineBPos+=3; lineCPos+=3; lineDPos+=3; outlinePos8[0] = b >> 8; //b outlinePos8[1] = g >> 8; //g outlinePos8[2] = r >> 8; //r outlinePos8+=3; /* SSE2 can't load byte alligned lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); //b outlinePos8[1] = _mm_extract_epi16(o128, 1); //g outlinePos8[2] = _mm_extract_epi16(o128, 2); //r outlinePos8+=3; */ } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } } } void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset) { float yposf,remainf; int yposi,tablepos,x,y; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline[4], *tline; int spitch = pitch/2; int neg = 0,shift = 0,skip,step; int origwidthbytes = widthbytes; int origwidthextra; __m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; // offset = -offset; if(offset < 0.0) neg = 1; yposf = height * offset; yposi = (int)floor(yposf); remainf = yposf - (float)yposi; tablepos = (int)(remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required // -3 , 0 best small notch at zero? // if(neg) { yposi -= 2; gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; } else { yposi -= 1; //offset inherent in the table gainD = gains[tablepos][0]; gainC = gains[tablepos][1]; gainB = gains[tablepos][2]; gainA = gains[tablepos][3]; } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: skip = 4; step = 16; break; case DECODED_FORMAT_RGB24: skip = 3; step = 16; break; case DECODED_FORMAT_YUYV: skip = 2; step = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: default: skip = 6; step = 32; break; } // scanline[0] = buffer; // scanline[1] = buffer + width*skip/2; // scanline[2] = buffer + width*skip/2*2; // scanline[3] = buffer + width*skip/2*3; widthbytes += (step - 1); widthbytes -= (widthbytes % step); origwidthextra = (origwidthbytes % step); scanline[0] = buffer; scanline[1] = buffer + widthbytes/2; scanline[2] = buffer + widthbytes/2*2; scanline[3] = buffer + widthbytes/2*3; for(y=0; y<4; y++) { if(yposi+y >=0 && yposi+y<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-yposi-y)*spitch; else ptr += (yposi+y)*spitch; memcpy(scanline[y], ptr, origwidthbytes); } else { memset(scanline[y], 0, origwidthbytes); } } { for(y=0;y<height; y++) { unsigned short *ptr = RGB48; if(neg) ptr += (height-y-1)*spitch; else ptr += y*spitch; outline128 = (__m128i *)ptr; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; //for(x=0;x<width*skip/2; x+=step) for(x=0;x<widthbytes; x+=step) { __m128i half; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip == 6) //RGB48 || WP13 { if(widthbytes == origwidthbytes || x+16 < origwidthbytes) _mm_storeu_si128(outline128++, o128); else { //if(x < origwidthbytes+16/*bytes in an SSE2 reg*/) _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra); outline128++; } } else { half = o128; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip != 6) //!RGB48 || !WP13 { half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); } if(widthbytes == origwidthbytes || x+32 < origwidthbytes) { _mm_storeu_si128(outline128++, o128); } else { //if(x+16 < origwidthbytes+16) if(origwidthextra > 16) { _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16); } outline128++; } } tline = scanline[0]; scanline[0] = scanline[1]; scanline[1] = scanline[2]; scanline[2] = scanline[3]; scanline[3] = tline; if(yposi+y+4 >=0 && yposi+y+4<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-(yposi+y+4))*spitch; else ptr += (yposi+y+4)*spitch; memcpy(scanline[3], ptr, origwidthbytes); } else { memset(scanline[3], 0, origwidthbytes); } } } } void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*3*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; // int holdstart = width*5/10; // Use to specify a area of uniform stretch // int holdend = width*5/10; int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); float flatxstep; float modified_xstep_avg; float bottomxstep; float basexstepstart; float basexstepend; float range; #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); range = (float)(holdend - holdstart); flatxstep = xstep-z*0.5f*xstep; modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range); bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg); if(holdstart == (width-holdend)) { basexstepstart = bottomxstep; basexstepend = bottomxstep; } else if(holdstart < (width-holdend)) { float a = (float)holdstart / (float)(width-holdend); float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend); basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } else { float a = (float)(width-holdend) / (float)holdstart; float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart; basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } #if 0 //Why is this not used? void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,remainf,xstepf; int xposi,tablepos,x; int Ra,Rb,Rc,Rd; int Ga,Gb,Gc,Gd; int Ba,Bb,Bc,Bd; int gainA,gainB,gainC,gainD; int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0,shift = 0; float offset = hoffset; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0 + frameTilt; } else { zoom /= 1.0 + frameTilt; } xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset); xposf -= width * roffset * 0.5 / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0/zoom; memcpy(scanline, RGB48, width*3*2); { unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = xposf * 65536.0; int ixstep = xstepf * 65536.0; float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * sscanline[(xp-1)*3]); g += (gains * sscanline[(xp-1)*3+1]); b += (gains * sscanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * scanline[(xp-1)*3]); g += (gains * scanline[(xp-1)*3+1]); b += (gains * scanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } /* memcpy(scanline, RGB48, width*3*2); { for(x=0;x<width*3; x+=3) //RGB { int r,g,b,xp = ((int)xposf)*3; xposf += xstepf; if(xp<0 || xp>= width*3) { RGB48[x] = 0; RGB48[x+1] = 0; RGB48[x+2] = 0; } else { r = scanline[xp]; g = scanline[xp+1]; b = scanline[xp+2]; RGB48[x] = r; RGB48[x+1] = g; RGB48[x+2] = b; } } } */ //_mm_empty(); } #endif void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 4; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f; xposf += line * (width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*4*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5f*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*4; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*4]); g += (gains * sscanline[xp*4+1]); b += (gains * sscanline[xp*4+2]); a += (gains * sscanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*4]); g += (gains * scanline[xp*4+1]); b += (gains * scanline[xp*4+2]); a += (gains * scanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask) { float line = (float)width * fabsf(windowMask); int pixelbytes = 6; float frac = (float)(line-(float)((int)line)); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: pixelbytes = 8; break; } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed { short *ptrL = (short *)RGB48; short *ptrR = (short *)RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } else { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } } void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t1,t2,t3; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; ptrL += 3; ptrR -= 3; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } memcpy(ptr, RGB48, (nwidth)*3*2); ptr += (nwidth)*3; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*3];//r *ptr++ = RGB48[(x+xposi-2)*3+1];//g *ptr++ = RGB48[(x+xposi-2)*3+2];//b } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); ptr += (width-xposi)*3; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t1,t2,t3,t4; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; t4 = ptrL[2]; ptrL[3] = ptrR[3]; ptrR[3] = t4; ptrL += 4; ptrR -= 4; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } memcpy(ptr, RGB48, (nwidth)*4*2); ptr += (nwidth)*4; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*4];//r *ptr++ = RGB48[(x+xposi-2)*4+1];//g *ptr++ = RGB48[(x+xposi-2)*4+2];//b *ptr++ = RGB48[(x+xposi-2)*4+3];//a } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2); ptr += (width-xposi)*4; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2, //l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4, //l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6, //l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8, if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*4; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0 //t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4 //t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4 //l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0 //t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0 //t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4 //t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4 //l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_srli_si128(l2,4*2); t2 = _mm_slli_si128(t2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0 //t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5 //t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l3,4*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offsetR, float offsetG, float offsetB , int flipR, int flipG, int flipB) { float Rxposf,Rremainf; int Rxposi,Rtablepos; float Gxposf,Gremainf; int Gxposi,Gtablepos; float Bxposf,Bremainf; int Bxposi,Btablepos; int x; int RgainA,RgainB,RgainC,RgainD; int GgainA,GgainB,GgainC,GgainD; int BgainA,BgainB,BgainC,BgainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int negR = 0; int negG = 0; int negB = 0; int shift = 0; __m128i l1,l2,l3,o128,t1,t2; __m128i *line128, *outline128; __m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3; if(flipR) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipG) { unsigned short *ptrL = &RGB48[1]; unsigned short *ptrR = &RGB48[1]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipB) { unsigned short *ptrL = &RGB48[2]; unsigned short *ptrR = &RGB48[2]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(offsetR < 0.0) negR = 1; if(offsetG < 0.0) negG = 1; if(offsetB < 0.0) negB = 1; Rxposf = width * offsetR; Rxposi = (int)floorf(Rxposf); Rremainf = Rxposf - (float)Rxposi; Rtablepos = (int)(Rremainf*(float)SUBPIXEL); Gxposf = width * offsetG; Gxposi = (int)floorf(Gxposf); Gremainf = Gxposf - (float)Gxposi; Gtablepos = (int)(Gremainf*(float)SUBPIXEL); Bxposf = width * offsetB; Bxposi = (int)floorf(Bxposf); Bremainf = Bxposf - (float)Bxposi; Btablepos = (int)(Bremainf*(float)SUBPIXEL); Rxposi = abs(Rxposi); Gxposi = abs(Gxposi); Bxposi = abs(Bxposi); if(Rxposi==0 && Rtablepos == 0) return; // no move required RgainA = gains[Rtablepos][0]; RgainB = gains[Rtablepos][1]; RgainC = gains[Rtablepos][2]; RgainD = gains[Rtablepos][3]; GgainA = gains[Gtablepos][0]; GgainB = gains[Gtablepos][1]; GgainC = gains[Gtablepos][2]; GgainD = gains[Gtablepos][3]; BgainA = gains[Btablepos][0]; BgainB = gains[Btablepos][1]; BgainC = gains[Btablepos][2]; BgainD = gains[Btablepos][3]; if(negR == 0) { unsigned short *ptr = scanline; int nwidth = width-Rxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Rxposi+2;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } for(x=0;x<nwidth;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Rxposi-2>=0) { *ptr++ = RGB48[(x+Rxposi-2)*3];//r ptr++;//g ptr++;//b } else { *ptr++ = 0;//r ptr++;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Rxposi;x<width;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<Rxposi+16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } if(negG == 0) { unsigned short *ptr = scanline; int nwidth = width-Gxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Gxposi+2;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } for(x=0;x<nwidth;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Gxposi-2>=0) { ptr++;//r *ptr++ = RGB48[(x+Gxposi-2)*3+1];//g ptr++;//b } else { ptr++;//r *ptr++ = 0;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Gxposi;x<width;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<Gxposi+16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } if(negB == 0) { unsigned short *ptr = scanline; int nwidth = width-Bxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Bxposi+2;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } for(x=0;x<nwidth;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Bxposi-2>=0) { ptr++;//r ptr++;//g *ptr++ = RGB48[(x+Bxposi-2)*3+2];//b } else { ptr++;//r ptr++;//g *ptr++ = 0;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Bxposi;x<width;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<Bxposi+16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA); gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA); gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA); gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB); gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB); gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB); gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC); gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC); gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC); gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD); gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD); gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA1); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD1); o128 = _mm_adds_epi16(o128,t1); t1 = gA1; gA1 = gA2; gA2 = gA3; gA3 = t1; t1 = gB1; gB1 = gB2; gB2 = gB3; gB3 = t1; t1 = gC1; gC1 = gC2; gC2 = gC3; gC3 = t1; t1 = gD1; gD1 = gD2; gD2 = gD3; gD3 = t1; l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint) { int x,val,ypos=0,upos=1,vpos=3; int step = 1,pos=0; short *ssbase = (short *)sbase; uint32_t *lbase = (uint32_t *)sbase; ToolsHandle *tools = decoder->tools; int scaledvectorscope = 0; if(tools == NULL) return; if(whitepoint == 13) { if(format == DECODED_FORMAT_RG64) format = DECODED_FORMAT_W13A; else format = DECODED_FORMAT_WP13; } while(width/step > 360) { step*=2; } tools->waveformWidth = width/step; decoder->tools->blurUVdone = 0; switch(format & 0xffffff) { case DECODED_FORMAT_WP13: decoder->tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*3; } break; case DECODED_FORMAT_W13A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*4; } break; case DECODED_FORMAT_RG48: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[0]>>8; G = sbase[1]>>8; B = sbase[2]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*3; } break; case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG30: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_AR10: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; B = (val>>22)&0xff; G = (val>>12)&0xff; R = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_R210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_DPX0: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>24)&0xff; G = (val>>14)&0xff; B = (val>>04)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[1]>>8; G = sbase[2]>>8; B = sbase[3]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*4; } break; case COLOR_FORMAT_UYVY: ypos=1,upos=0,vpos=2; case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109 case COLOR_FORMAT_YUYV: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 2; Y = bptr[ypos]-16; U = bptr[upos]-128; Y+= bptr[ypos+2]-16; Y>>=1; V = bptr[vpos]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; //TODO much -20 to 120 RGB range. if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } //* 255.0/314.0 //* 255.0/244.0 U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_YU64: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; bptr++; //read only the high byte out of the 16-bit Y = bptr[0]-16; V = bptr[2]-128; Y+= bptr[4]-16; Y>>=1; U = bptr[6]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_V210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint32_t *lptr = (uint32_t *)sbase; lptr += (x/6)*4; switch(x % 6) { case 0: V = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; U = ((*lptr>>22) & 0xff) - 128; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 1: lptr++; Y = ((*lptr>>02) & 0xff) - 16; V = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; lptr--; U = ((*lptr>>22) & 0xff) - 128; break; case 2: lptr++; Y = ((*lptr>>22) & 0xff) - 16; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1; V = ((*lptr>>22) & 0xff) - 128; break; case 3: lptr++; V = ((*lptr>>12) & 0xff) - 128; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 4: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; Y = ((*lptr>>02) & 0xff) - 16; U = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; break; case 5: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; U = ((*lptr>>12) & 0xff) - 128; Y = ((*lptr>>22) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; } R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB24: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 3; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB32: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_BYR2: case COLOR_FORMAT_BYR4: //do nothing break; default: assert(0); #if (0 && DEBUG) fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n"); #endif break; } } void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR) { #if 1 int x,RL,GL,BL,RR,GR,BR; int nRL,nGL,nBL; int nRR,nGR,nBR; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { RL = sbaseL[0]>>6; GL = sbaseL[1]>>6; //10-bit BL = sbaseL[2]>>6; RL*=RL; GL*=GL; //20-bit BL*=BL; RR = sbaseR[0]>>6; GR = sbaseR[1]>>6; //10-bit BR = sbaseR[2]>>6; RR*=RR; GR*=GR; //20-bit BR*=BR; nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL; nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL; nRL >>= 10; //20-bit nGL >>= 10; nBL >>= 10; if(nRL>max) nRL=max; if(nRL<0) nRL=0; if(nGL>max) nGL=max; if(nGL<0) nGL=0; if(nBL>max) nBL=max; if(nBL<0) nBL=0; if(sqrttable[nRL] == 65535) sqrttable[nRL] = (int)sqrt(nRL); if(sqrttable[nGL] == 65535) sqrttable[nGL] = (int)sqrt(nGL); if(sqrttable[nBL] == 65535) sqrttable[nBL] = (int)sqrt(nBL); sbaseL[0] = sqrttable[nRL]<<6; sbaseL[1] = sqrttable[nGL]<<6; sbaseL[2] = sqrttable[nBL]<<6; sbaseL += 3; nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR; nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR; nRR >>= 10; //20-bit nGR >>= 10; nBR >>= 10; if(nRR>max) nRR=max; if(nRR<0) nRR=0; if(nGR>max) nGR=max; if(nGR<0) nGR=0; if(nBR>max) nBR=max; if(nBR<0) nBR=0; if(sqrttable[nRR] == 65535) sqrttable[nRR] = (int)sqrt(nRR); if(sqrttable[nGR] == 65535) sqrttable[nGR] = (int)sqrt(nGR); if(sqrttable[nBR] == 65535) sqrttable[nBR] = (int)sqrt(nBR); sbaseR[0] = sqrttable[nRR]<<6; sbaseR[1] = sqrttable[nGR]<<6; sbaseR[2] = sqrttable[nBR]<<6; sbaseR += 3; } #else // works and fast but has not image linearization, not as good __m128i *ptrL = (__m128i *)sbaseL; __m128i *ptrR = (__m128i *)sbaseR; __m128i t,L,R,nL,nR; int x,width8 = (width*3) & ~7; __m128i white_epi16 = _mm_set1_epi16(32767); __m128i leak_epi16 = _mm_set1_epi16(ileak>>1); __m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1)); for(x=0;x<width8;x+=8) { L = _mm_load_si128(ptrL); R = _mm_load_si128(ptrR); L = _mm_srli_epi16(L,1); //15-bit R = _mm_srli_epi16(R,1); //15-bit nL = _mm_mulhi_epi16(L, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nL = _mm_adds_epi16(nL, t); t = _mm_mulhi_epi16(R, leak_epi16); nL = _mm_subs_epu16(nL, t); nR = _mm_mulhi_epi16(R, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nR = _mm_adds_epi16(nR, t); t = _mm_mulhi_epi16(L, leak_epi16); nR = _mm_subs_epu16(nR, t); L = _mm_slli_epi16(nL,2); R = _mm_slli_epi16(nR,2); _mm_store_si128(ptrL++, L); _mm_store_si128(ptrR++, R); } #endif } void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { #if 1 int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit nG = G*(1023-ileakR) + ileakR*max - R*ileakR; nB = B*(1023-ileakR) + ileakR*max - R*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } #elif 0 int x; float R,G,B; float nR,nG,nB; float fleakL = (float)ileakL / 65535.0; float fleakR = (float)ileakR / 65535.0; for(x=0;x<width;x++) { R = sbase[0]; G = sbase[1]; B = sbase[2]; R /= 65535.0; G /= 65535.0; B /= 65535.0; R *= R; G *= G; B *= B; nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL; nG = G*(1.0-fleakR) + fleakR - R*fleakR; nB = B*(1.0-fleakR) + fleakR - R*fleakR; if(nR<0) nR=0; if(nG<0) nG=0; if(nB<0) nB=0; nR = sqrt(nR); nG = sqrt(nG); nB = sqrt(nB); sbase[0] = nR * 65535.0; sbase[1] = nG * 65535.0; sbase[2] = nB * 65535.0; sbase += 3; } #elif 0 __m128i RGBRGB,rgb_epi32,RGB1,RGB2; __m128i zero_epi128 = _mm_setzero_si128(); int x,width6 = (width*3) / 6 * 6; __m128 white_ps = _mm_set1_ps(1.0); __m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0)); __m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0); __m128 scale_ps = _mm_set1_ps(65535.0); __m128 scalehalf_ps = _mm_set1_ps(32767.0); __m128 zero_ps = _mm_set1_ps(0.0); __m128 rgb_ps, alt_rgb_ps; __m128i sub_epi32; __m128 sub_ps; for(x=0;x<width6;x+=6) // two RGB pairs { int R,G,B; RGBRGB = _mm_loadu_si128((__m128i *)sbase); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB1 = _mm_cvtps_epi32(rgb_ps); RGB1 = _mm_packs_epi32 (RGB1, zero_epi128); RGB1 = _mm_slli_si128(RGB1, 10); RGB1 = _mm_srli_si128(RGB1, 10); RGBRGB = _mm_srli_si128(RGBRGB, 6); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB2 = _mm_cvtps_epi32(rgb_ps); RGB2 = _mm_packs_epi32 (RGB2, zero_epi128); RGB2 = _mm_slli_si128(RGB2, 6); RGB1 = _mm_adds_epi16(RGB1, RGB2); RGB1 = _mm_slli_epi16(RGB1, 1); RGB1 = _mm_slli_si128(RGB1, 4); RGB1 = _mm_srli_si128(RGB1, 4); RGBRGB = _mm_srli_si128(RGBRGB, 6); RGBRGB = _mm_slli_si128(RGBRGB, 12); RGBRGB = _mm_adds_epi16(RGB1, RGBRGB); _mm_storeu_si128((__m128i *)sbase, RGBRGB); sbase += 6; } #endif } void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - B*ileakL; nG = G*(1023-ileakL) + ileakL*max - B*ileakL; nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - G*ileakL; nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR; nB = B*(1023-ileakL) + ileakL*max - G*ileakL; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank) { uint16_t *scratchline,*scratchline2,*scratchline3; uint16_t *sptr; uint16_t *srclineA,*srclineB; uint16_t *dstlineA,*dstlineB; int x,y2; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; int sskip = 3; uint8_t *bptr1; uint8_t *bptr2; uint8_t *baseptr1; uint8_t *baseptr2; float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL; float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR; float frameTilt = decoder->cfhddata.channel[0].FrameTilt; float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset; float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset; float rotOffset = decoder->cfhddata.channel[1].RotationOffset; float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset; float horizOffsetStep = 0; float horizOffsetStepR = 0; int flip1=0,flip2=0; int channel_flip = decoder->cfhddata.channel_flip; int source_pitch1 = source_pitch; int source_pitch2 = source_pitch; uint8_t *outputline = output+y*pitch; uint8_t *outputline2 = NULL; float horizOffsetBase; float rotOffsetBase; float horizOffsetBaseR; float rotOffsetBaseR; int formatdone = 0; float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; float zoom; float zoomR; float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom; float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom; float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom; float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom; float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom; float frameHDynamic = decoder->cfhddata.FrameHDynamic; float frameHDynCenter = decoder->cfhddata.FrameHDynCenter; float frameHDynWidth = decoder->cfhddata.FrameHDynWidth; float frameHScale = decoder->cfhddata.FrameHScale; int alphachannel = 0; int whitepoint = 16; float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen; float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen; float vignette = decoder->cfhddata.channel[0].user_vignette_start; int flip_LR = 0; float vig_r1; float vig_r2; float vig_gain; if(blank) // blankline, no shifts required { windowMaskL = 0; windowMaskR = 0; frameTilt = 0; horizOffset = 0; horizOffsetR = 0; rotOffset = 0; rotOffsetR = 0; frameZoom1 = 1.0; frameZoom2 = 1.0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; frameHScale = 1.0; frameHDynamic = 1.0; frameHDynCenter = 0.5; frameHDynWidth = 0.0; } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(xmax == 0.0) xmax = 1.0; if(ymax == 0.0) ymax = 1.0; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } if(decoder->source_channels < 2) // 2D { channel_flip &= 0x3; channel_flip |= channel_flip<<2; decoder->cfhddata.channel_flip = channel_flip; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpenL = 0.0; blursharpenR = 0.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { horizOffset = rotOffset = 0; horizOffsetR = rotOffsetR = 0; frameTilt = 0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { horizOffset += decoder->cfhddata.FrameOffsetX; horizOffsetR -= decoder->cfhddata.FrameOffsetX; frameZoom1 += frameHScale - 1.0f; frameZoom2 += frameHScale - 1.0f; if(frameHDynamic != 1.0) { frameZoom1 += 0.00001f; frameZoom2 += 0.00001f; } if(vignette != 0.0) { float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width)); vig_r1 = (vignette+1.0f); vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f); vig_gain = decoder->cfhddata.channel[0].user_vignette_gain; vig_r1 *= vig_diag; vig_r2 *= vig_diag; } } else { frameZoom1 = 1.0f; frameZoom2 = 1.0f; vignette = 0; } zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1; if(frameDiffZoom2 != 0.0) zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2; else zoomR = 0.0; if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { if(decoder->cfhddata.InvertOffset) { rotOffset = -rotOffset; rotOffsetR = -rotOffsetR; rotOffset -= decoder->cfhddata.FrameOffsetR; rotOffsetR -= -decoder->cfhddata.FrameOffsetR; } else { rotOffset += decoder->cfhddata.FrameOffsetR; rotOffsetR += -decoder->cfhddata.FrameOffsetR; } } rotOffsetBase = rotOffset; horizOffsetBase = horizOffset; rotOffsetBaseR = rotOffsetR; horizOffsetBaseR = horizOffsetR; horizOffset -= rotOffset * 0.5f; horizOffsetStep = rotOffset / (float)height; horizOffsetR -= rotOffsetR * 0.5f; horizOffsetStepR = rotOffsetR / (float)height; horizOffset += horizOffsetStep * y; horizOffsetR += horizOffsetStepR * y; assert(bufferremain >= width * 8 * 2 * 2); baseptr1 = source_buffer; baseptr2 = source_buffer + channel_offset; if(channel_flip & 0xf) { if(channel_flip & 1) { flip1 = 1; } if(channel_flip & 4) { flip2 = 1; } } if(source_pitch1 < 0) flip_LR = 1; decoder->sharpen_flip = 0; if(channel_flip & 2) //ProcessLine3D { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { } else { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } } if(channel_flip & 8) { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } else { baseptr2 += source_pitch2*(height-1); source_pitch2 = -source_pitch2; } } bptr1 = baseptr1 + y*source_pitch1; bptr2 = baseptr2 + y*source_pitch2; y2 = y; if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView { if(y2 < height/4) { blank = 1; y2 = 0; } else { y2 -= height/4; y2 *= 2; if(y2 >= height-1) { blank = 1; y2 = height - 2; } } bptr1 = baseptr1 + y2*source_pitch1; bptr2 = baseptr2 + y2*source_pitch2; } srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ; if(alphachannel) { scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ; } dstlineA = sptr = scratchline; dstlineB = scratchline3; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: whitepoint = 16; skip = 8; sskip = 4; break; case DECODED_FORMAT_W13A: whitepoint = 13; skip = 8; sskip = 4; break; case DECODED_FORMAT_WP13: whitepoint = 13; skip = 6; sskip = 3; break; case DECODED_FORMAT_RG48: skip = 6; sskip = 3; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } if(blank) { if(srclineA) memset(srclineA, 0, width*skip); if(srclineB && decoder->channel_decodes > 1) memset(srclineB, 0, width*skip); } if(blursharpenL != 0.0 || blursharpenR != 0.0) { if(decoder->channel_blend_type == BLEND_FREEVIEW || decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED ) { decoder->doVerticalFilter = 0; } else { decoder->doVerticalFilter = 1; } } { switch(decoder->channel_blend_type) { case BLEND_FREEVIEW: case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side if(!blank) { if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { dstlineA = srclineA; sptr = dstlineA; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2); } else { int16_t *ptr; int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(!alphachannel) { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); dstlineA = srclineA; ptr = (int16_t *)srclineA; for(x=0; x<width/2; x++) { *ptr++ = (ptr1[0]+ptr1[3])>>1; *ptr++ = (ptr1[1]+ptr1[4])>>1; *ptr++ = (ptr1[2]+ptr1[5])>>1 ; ptr1+=sskip*2; } for(; x<width; x++) { *ptr++ = (ptr2[0]+ptr2[3])>>1; *ptr++ = (ptr2[1]+ptr2[4])>>1; *ptr++ = (ptr2[2]+ptr2[5])>>1; ptr2+=sskip*2; } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, dstlineA, width/2, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2); memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2); memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2); } } break; case BLEND_STACKED_ANAMORPHIC: //stacked case BLEND_LINE_INTERLEAVED: //fields if((y & 1) == 1) return; if(!blank) { uint16_t *ptrA1 = (uint16_t *)srclineA; uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1); uint16_t *ptrB1 = (uint16_t *)srclineB; uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1); FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip); FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip); if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->doVerticalFilter == 0) { if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline2 = output+(y>>1)*pitch; outputline = output+((y>>1)+(height/2))*pitch; } else { outputline = output+(y>>1)*pitch; outputline2 = output+((y>>1)+(height/2))*pitch; } } else //fields { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline = output+(y)*pitch; outputline2 = output+(y+1)*pitch; } else { outputline2 = output+(y)*pitch; outputline = output+(y+1)*pitch; } } if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right { uint8_t *tmp = outputline2; outputline2 = outputline; outputline = tmp; } } else { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2, srclineA, width*skip); memcpy(srclineA, srclineB, width*skip); memcpy(srclineB, scratchline2, width*skip); } } } break; case BLEND_ONION: //onion case BLEND_DIFFERENCE: //difference case BLEND_SPLITVIEW: //splitView if(!blank) { //dstlineA = source_buffer; //dstlineA += (source_pitch>>1) * y; sptr = dstlineA = srclineA; srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } x = 0; if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view { int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255; for(x = xsplit*sskip; x<width*sskip; x++) { srclineA[x] = srclineB[x]; } } else if(decoder->channel_blend_type == BLEND_ONION) //onion { FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip); } else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference { #if XMMOPT int width8 = (width*sskip) & 0xfff8; __m128i mid_epi16; //int unaligned = ((int)sbase) & 15; //unaligned += ((int)in_rgb8) & 15; if(whitepoint == 13) mid_epi16 = _mm_set1_epi16(0x0fff); else mid_epi16 = _mm_set1_epi16(0x1fff); for(x=0; x<width8; x+=8) { __m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]); __m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]); // 0 to 0xffff if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff } else { rgb16A = _mm_subs_epi16(rgb16A, rgb16B); } rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff _mm_store_si128((__m128i *)&dstlineA[x], rgb16A); } #endif for(; x<width*sskip; x++) { int val; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { val = (srclineB[x] - srclineA[x]) + 32768; } else { val = (srclineA[x] - srclineB[x]) + 32768; } if(val > 0x7fff) val = 0x7fff; if(val < 0) val = 0; dstlineA[x] = val; } } } break; case BLEND_ANAGLYPH_RC: case BLEND_ANAGLYPH_RC_BW: case BLEND_ANAGLYPH_AB: case BLEND_ANAGLYPH_AB_BW: case BLEND_ANAGLYPH_GM: case BLEND_ANAGLYPH_GM_BW: case BLEND_ANAGLYPH_DUBOIS: //Optimized { uint16_t *sptr1 = scratchline2; uint16_t *sptr2 = scratchline3; dstlineA = (uint16_t *)bptr1; // dstlineA += (source_pitch>>1) * y; sptr = dstlineA; sptr1 = srclineA = (uint16_t *)bptr1; sptr2 = srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1); RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { uint16_t *tmp = srclineA; srclineA = srclineB; srclineB = tmp; } switch(decoder->channel_blend_type) { case BLEND_ANAGLYPH_RC: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_RC_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_DUBOIS: //Optimized { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; int r,g,b; for(x=0; x<width; x++) { r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000; g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000; b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000; if(r<0) r=0; if(r>0x3fff) r=0x3fff; if(g<0) g=0; if(g>0x3fff) g=0x3fff; if(b<0) b=0; if(b>0x3fff) b=0x3fff; sptr[0] = r; sptr[1] = g; sptr[2] = b; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } break; } } break; case BLEND_NONE: default: if(decoder->channel_decodes == 1) // only one channel { if(skip == 8) { //the data is already in the correct format sptr = (unsigned short *)bptr1; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else if(skip == 6) { //the data is already in the correct format dstlineA = sptr = (unsigned short *)srclineA; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); } if(decoder->channel_current == 0) { if(blursharpenL != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); } } else { if(blursharpenR != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip); } } } if ((windowMaskL && decoder->channel_current == 0) || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; if(decoder->channel_current != 0) mask = xmin; if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); RGB48WindowMask(decoder, srclineA, width, 0, mask); } if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); if(decoder->channel_current != 1) mask = (1.0f-xmax); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR); RGB48WindowMask(decoder, srclineA, width, 1, mask); } } else { outputline2 = output+(y+height)*pitch; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); else RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } } break; } } if(!formatdone) { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { flags = 0; whitebitdepth = 13; } if(outputline2) { // if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); } } else { //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) //{ // if(alphachannel) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth); // else // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); //} if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } } } void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index) { uint16_t *sbase;//*sbase2 = NULL; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; //int flip1=0;//flip2=0; int channel_flip = decoder->cfhddata.channel_flip; //int local_pitch1 = local_pitch; //int local_pitch2 = local_pitch; uint8_t *outputline = output+y*pitch; //uint8_t *outputline2 = NULL; short *scratch; //int formatdone = 0; //float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; //float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; //float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; int alphachannel = 0; float blursharpen = 0; int line_max = decoder->frame.height; int yy = y; if(decoder->channel_current == 0) blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen else blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)|| decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpen = 0.0; } if(decoder->channel_mix_half_res == 1) line_max *= 2; if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->sharpen_flip) //SharpenLine { //if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1) { yy = (line_max - 1 - y); outputline = output+yy*pitch; } } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: case DECODED_FORMAT_W13A: skip = 8; break; case DECODED_FORMAT_WP13: skip = 6; break; case DECODED_FORMAT_RG48: skip = 6; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } scratch = (short*)(buffer + width * skip * thread_index); { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)) { int use_pitch = local_pitch; int edgeclose = 0; flags = 0; whitebitdepth = 13; if(blursharpen != 0.0 && local_pitch != 0) { short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr; switch(decoder->channel_blend_type) { case BLEND_STACKED_ANAMORPHIC: sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y * 2; if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch * 2; break; case BLEND_LINE_INTERLEAVED: sbase = (uint16_t *)local_output; if(y & 1) { y--; sbase += (local_pitch>>1) * y; } else { sbase += (local_pitch>>1) * y; sbase += channel_offset>>1; } if(y<=8) edgeclose = 1; if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-8) edgeclose = 1; use_pitch = local_pitch * 2; break; default: if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch; break; } if(skip == 8) { FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } else { FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } sbase = (uint16_t *)scratch; } } if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } #if _GRAPHICS void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x,y,v,width, height; int maxR=0,maxG=0,maxB=0; width = decoder->frame.width; height = decoder->frame.height; if(decoder->cfhddata.BurninFlags == 0) return; if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } decoder->frame.output_format = output_format; #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif { int avgR=0,avgG=0,avgB=0; // Post a message to the mailbox mailbox->output = output; if(height >= 1080) { mailbox->pitch = pitch*4; // only read every 4th scan line workunits = height/4; // only read every 4th scan line } else if(height >= 540) { mailbox->pitch = pitch*2; // only read every 2th scan line workunits = height/2; // only read every 2th scan line } else { mailbox->pitch = pitch; // read every scan line workunits = height; // read every scan line } if(decoder->tools->histogram == 0) { mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } for(x=0;x<256;x++) { avgR += decoder->tools->histR[x]; avgG += decoder->tools->histG[x]; avgB += decoder->tools->histB[x]; //if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; //if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; //if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } avgR /= 256; avgG /= 256; avgB /= 256; //maxR++; //maxG++; //maxB++; decoder->tools->maxR = avgR*3;//maxR; decoder->tools->maxG = avgG*3;//maxG; decoder->tools->maxB = avgB*3;//maxB; } } #endif if(decoder->cfhddata.BurninFlags && DrawOpen(decoder)) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { #if _THREADED //DrawInit(decoder); //DrawStartThreaded(decoder); if(decoder->draw_thread.pool.thread_count > 0) { DrawWaitThreaded(decoder); } else #endif { DrawInit(decoder); DrawMetadataObjects(decoder); } } else { DrawInit(decoder); } if(decoder->drawSafeMarkers) DrawSafeMarkers(decoder); if(decoder->cfhddata.BurninFlags & 2) // tools { if(decoder->tools) { if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16) DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2) DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4) DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8) DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/); } } DrawScreen(decoder, output, pitch, output_format); } #if 0 #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; int targetW, targetH; if(width < 256 || height < 256) return; targetW = width / 4; targetH = height / 8; mailbox->output = output; mailbox->pitch = pitch; workunits = targetW; mailbox->jobType = JOB_TYPE_BURNINS; // burnin // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else if(decoder->histogram == 0) { for(y=0; y<height; y+=4) { uint8_t *bptr = output; bptr += pitch * y; HistogramLine(decoder, (unsigned short *)bptr, width, output_format); if(decoder->histogram == 0) return; // don't know how to create Histogram for that format } } for(x=1;x<255;x++) { if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } maxR++; maxG++; maxB++; decoder->maxR = maxR; decoder->maxG = maxG; decoder->maxB = maxB; for(x=0; x<targetW; x++) { HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH); } #endif #endif if(decoder->tools) memset(decoder->tools, 0, sizeof(ToolsHandle)); } #endif extern int geomesh_alloc_cache(void *gm); #define DEG2RAD(d) (PI*(d)/180.0f) #define RAD2DEG(r) (180.0f*(r)/PI) bool approx_equal(int x, int y) { if(y > 1080) { x >>= 6; y >>= 6; } else if(y > 540) { x >>= 5; y >>= 5; } else { x >>= 4; y >>= 4; } if(x == y || x+1 == y || x == y+1) return true; return false; } bool approx_equal_float(float x, float y) { if (x*0.99 < y && y < x*1.01) return true; return false; } #if WARPSTUFF void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int width, height; //int maxR = 0, maxG = 0, maxB = 0; int status = WARPLIB_SUCCESS; CFHDDATA *cfhddata = &decoder->cfhddata; int backgroundfill = cfhddata->lensFill; float sensorcrop = 1.0; float phi, theta, rho; int srcLens = HERO4; if (!cfhddata->doMesh) return; if (decoder->lastLensOffsetX != cfhddata->LensOffsetX || decoder->lastLensOffsetY != cfhddata->LensOffsetY || decoder->lastLensOffsetZ != cfhddata->LensOffsetZ || decoder->lastLensOffsetR != cfhddata->LensOffsetR || decoder->lastLensZoom != cfhddata->LensZoom || decoder->lastLensFishFOV != cfhddata->LensFishFOV || decoder->lastLensGoPro != cfhddata->lensGoPro || decoder->lastLensSphere != cfhddata->lensSphere || decoder->lastLensFill != cfhddata->lensFill || decoder->lastLensStyleSel != cfhddata->lensStyleSel || memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) || memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) ) { if (decoder->mesh) geomesh_destroy(decoder->mesh); width = decoder->frame.width; height = decoder->frame.height; if (approx_equal(width, height * 2)) // approx. 2:1 { float outputaspect = 16.0f/9.0f; srcLens = EQUIRECT; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. if (cfhddata->lensCustomSRC[1]) { outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1]; if (outputaspect >= 1.0f && outputaspect <= 3.0f) { //float sourceratio = (float)width / (float)height; if (approx_equal_float(outputaspect, 4.0f / 3.0f)) sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height))); if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. } } if (width >= 2496) decoder->mesh = geomesh_create(199, 99); else if (width >= 1272) decoder->mesh = geomesh_create(99, 49); else decoder->mesh = geomesh_create(49, 25); phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1 theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else if (approx_equal(width * 3, height * 4)) // approx. 4:3 { srcLens = HERO4; sensorcrop = 1.0; if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else //if(approx_equal(width*9,height*16)) // approx. 16:9 { srcLens = HERO4; sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000 if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill); else assert(0); if (cfhddata->lensSphere == 1) { if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT { if (cfhddata->LensOffsetR != 0.0) { //float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); if (cfhddata->LensOffsetR < 0.0) angle = -angle; geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); if (cfhddata->LensFishFOV != 0.0) // DeFish { float fov = cfhddata->LensFishFOV;// *180.0; if (fov > 89.9f) fov = 89.9f; if (fov < -89.9f) fov = -89.9f; if (fov) status |= geomesh_transform_defish(decoder->mesh, fov); } } switch (cfhddata->lensGoPro) { case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break; case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break; case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break; case 4: geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS); else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS); break; } } else // old boring geometry { if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); // basic orthographic moves if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0) geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height); if (cfhddata->LensOffsetR != 0.0) { float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f); geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->lensGoPro == 0) //Rectilear status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop); //status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT, // width, height, product, model, lens_type, fov, (int)decoder->frame.resolution); } geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE if (status == WARPLIB_SUCCESS) { if (decoder->lens_correct_buffer == NULL) { #if _ALLOCATOR decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height); #else decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height); #endif } } else { return; } /* need resources? { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } */ #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; #if _DELAY_THREAD_START if (decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16, WorkerThreadProc, decoder); } #endif { // Post a message to the mailbox mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_CACHE; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #endif //decoder->frame.output_format = output_format; decoder->lastLensOffsetX = cfhddata->LensOffsetX; decoder->lastLensOffsetY = cfhddata->LensOffsetY; decoder->lastLensOffsetZ = cfhddata->LensOffsetZ; decoder->lastLensOffsetR = cfhddata->LensOffsetR; decoder->lastLensZoom = cfhddata->LensZoom; decoder->lastLensFishFOV = cfhddata->LensFishFOV; decoder->lastLensGoPro = cfhddata->lensGoPro; decoder->lastLensSphere = cfhddata->lensSphere; decoder->lastLensFill = cfhddata->lensFill; decoder->lastLensStyleSel = cfhddata->lensStyleSel; memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)); memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); } #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(backgroundfill) // may need to blur the filled in areas { mailbox->data = decoder->mesh; mailbox->output = (uint8_t *)decoder->lens_correct_buffer; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.width; mailbox->chunk_size = 16; mailbox->pitch = pitch; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_BLURV; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else // not threading { //geomesh_cache_init_bilinear(decoder->mesh); //bad geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height); } #endif memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height); /* if(lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, lens_correct_buffer); #else MEMORY_ALIGNED_FREE(lens_correct_buffer); #endif geomesh_destroy(mesh); */ } void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x, y, width, height; int minY, maxY; int minX, maxX; CFHDDATA *cfhddata = &decoder->cfhddata; uint8_t *line = output; uint32_t fillA = 0; uint32_t fillB = 0; int bitsize = 8; if (!cfhddata->doMesh) return; width = decoder->frame.width; height = decoder->frame.height; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return; minY = (int)(decoder->cfhddata.LensYmin*(float)height); maxY = (int)(decoder->cfhddata.LensYmax*(float)height); minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch); maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch); if (FORMATRGB(output_format)) { line = output; // Top rows for (y = 0; y < minY; y++) { memset(line, 0, abs(pitch)); line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { memset(line, 0, minX); memset(line + maxX, 0, pitch - maxX); line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { memset(line, 0, abs(pitch)); line += pitch; } } else { switch (output_format & 0x7fffffff) { case COLOR_FORMAT_YVYU: case COLOR_FORMAT_YUYV: fillA = 0x10; fillB = 0x80; break; case COLOR_FORMAT_UYVY: case COLOR_FORMAT_2VUY: fillA = 0x80; fillB = 0x10; break; case COLOR_FORMAT_YU64: fillA = 0x8000; fillB = 0x1000; bitsize = 16; break; } } if (bitsize == 8) { line = output; // Top rows for (y = 0; y < minY; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { for (x = 0; x < minX; x += 2) { line[x] = fillA; line[x + 1] = fillB; } for (x = maxX; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } } #endif //#if WARPSTUFF void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset) { uint8_t *local_output_double = local_output; //Frame_Region emptyFrameMask = {0}; if(decoder->StereoBuffer) local_output_double = local_output = (uint8_t *)decoder->StereoBuffer; if(channel_offset < 0) // channel swapped { channel_offset = -channel_offset; } if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format)) { local_output += local_pitch*(decoder->frame.height-1); if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1); else local_output_double = local_output; local_pitch = -local_pitch; } if(FLIPCOLORS(output_format) || output_format & 0x80000000) { decoder->cfhddata.InvertOffset = 1; } else { decoder->cfhddata.InvertOffset = 0; } decoder->frame.format = output_format; //decoder->frame.colorspace = COLOR_SPACE_CG_601; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[2].FrameTilt)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || // decoder->cfhddata.FrameOffsetX || || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0) )) { //int x; int xbytes, xstep; //uint8_t *base = local_output; int width, height, chunk_size; int fine_vertical = 0; width = decoder->frame.width; height = decoder->frame.height; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xbytes = width*8; xstep = 32; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: xbytes = width*6; xstep = 32; break; default: assert(0); break; } if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) || (decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 && decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 && decoder->cfhddata.FrameOffsetR == 0.0)) { chunk_size = 8; } else { chunk_size = 1; if((fabs(decoder->cfhddata.channel[1].RotationOffset) + fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 || (fabs(decoder->cfhddata.channel[2].RotationOffset) + fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015) { switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xstep = 4; break; case DECODED_FORMAT_RGB24: xstep = 3; break; case DECODED_FORMAT_YUYV: xstep = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xstep = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xstep = 6; break; } fine_vertical = 1; } } if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && (decoder->frame.resolution == DECODED_RESOLUTION_FULL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) && decoder->codec.progressive == false) { int interlaced_pitch = local_pitch * 2; uint8_t *field2_output = local_output + local_pitch; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->chunk_size = chunk_size; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); // Post a message to the mailbox mailbox->local_output = field2_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } else { //TODO Lens corect here. //call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.) // JOB_TYPE_HORIZONTAL_3D //before doing any offset and rotation corrections. if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129 width /= 2; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; decoder->doVerticalFilter = 0; mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(decoder->doVerticalFilter) { // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output_double; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0) mailbox->line_max *= 2; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else { int y,width, height; uint8_t scratch[4096*16]; int scratchremain = 4096*16; int ymin = 0, ymax; width = decoder->frame.width; height = decoder->frame.height; ymax = height; if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32)) { ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY; ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY; } if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) { int x,xbytes, xstep; uint8_t *base = local_output; float voffsetstep; float voffset = decoder->cfhddata.channel[1].VerticalOffset; float roffset = decoder->cfhddata.channel[1].RotationOffset; float voffset1, voffset2; float voffsetstep1, voffsetstep2; int channel_flip = decoder->cfhddata.channel_flip; int aspectx,aspecty; float aspectfix; GetDisplayAspectRatio(decoder, &aspectx, &aspecty); aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty); if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { voffset = roffset = 0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) voffset += decoder->cfhddata.FrameOffsetY; if(decoder->cfhddata.InvertOffset) { voffset = -voffset; roffset = -roffset; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xbytes = width*6; xstep = 32; break; } //DAN20100923 -- simplied //voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5; //voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep); voffset += roffset * aspectfix * 0.5; voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep); if(roffset == 0.0) xstep = xbytes; voffset1 = voffset2 = voffset; voffsetstep1 = voffsetstep2 = voffsetstep; if(channel_flip & 0xf) { if(channel_flip & 2) { voffset1 = -voffset1; voffsetstep1 = -voffsetstep1; } if(channel_flip & 8) { voffset2 = -voffset2; voffsetstep2 = -voffsetstep2; } if(channel_flip & 1) { voffset1 += voffsetstep1*(xbytes/xstep); voffsetstep1 = -voffsetstep1; } if(channel_flip & 4) { voffset2 += voffsetstep2*(xbytes/xstep); voffsetstep2 = -voffsetstep2; } } for(x=0; x<xbytes; x+=xstep) { if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } else { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, voffset1); } if(decoder->channel_decodes == 2) { uint8_t *bptr = base + channel_offset; RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } base += xstep; voffset1 += voffsetstep1; voffset2 += voffsetstep2; } } if(decoder->channel_mix_half_res == 1) height *= 2; if(ymin) { memset(local_output, 0, abs(local_pitch)); // zero one line; } for(y=0; y<ymin; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } for(; y<ymax; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0); } for(; y<height; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } } #endif } // Decode a sample from the input bitstream into the output frame buffer bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0}; int channel_decodes = 1; // 3D Work int channel_offset = 0; int channel_mask = 0; int channel_current = 0; //int wavelet_index; bool result = true; uint8_t *local_output = output; uint8_t *local_buffer = NULL; int local_pitch = pitch; int internal_format = decoder->frame.format; int output_format = decoder->frame.output_format; bool use_local_buffer = false; DECODER *local_decoder = decoder; //Frame_Region emptyFrameMask = {0}; Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER; int orig_width = decoder->frame.width; int orig_height = decoder->frame.height; decoder->local_output = local_output; // used for NV12 decodes. decoder->sample_uncompressed = 0; // set if a uncompressed sample is found. decoder->image_dev_only = 0; if(decoder->flags & (1<<3)) // This is an image development only decode. { decoder->sample_uncompressed = 1; decoder->image_dev_only = 1; decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444; decoder->codec.unique_framenumber = 0; //What should this be? decoder->frame.white_point = 16; // how to we pass this in? decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer; switch(output_format & 0x7fffffff) { case COLOR_FORMAT_RGB24: decoder->uncompressed_size = orig_width * orig_height * 3; break; case COLOR_FORMAT_RGB32: decoder->uncompressed_size = orig_width * orig_height * 4; break; case COLOR_FORMAT_RG48: case COLOR_FORMAT_WP13: decoder->uncompressed_size = orig_width * orig_height * 6; break; default: decoder->uncompressed_size = orig_width * orig_height * 6; assert(0); break; } } decoder->frame.alpha_Companded = 0; // reset this state. if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = 0; decoder->error = CODEC_ERROR_OKAY; input->error = BITSTREAM_ERROR_OKAY; // first time through encoded_format is not initized. if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed { SAMPLE_HEADER header; BITSTREAM input2; InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ); memset(&header, 0, sizeof(SAMPLE_HEADER)); header.find_lowpass_bands = 2; // help finding the uncompressed flag if(ParseSampleHeader(&input2, &header)) { decoder->codec.encoded_format = header.encoded_format; decoder->sample_uncompressed = header.hdr_uncompressed; if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed; } } if((uintptr_t)input->lpCurrentBuffer & 0x3) { if(decoder->aligned_sample_buffer == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16); #else decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } else { if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size) { memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); } else { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } } input->lpCurrentBuffer = decoder->aligned_sample_buffer; input->lpCurrentWord = decoder->aligned_sample_buffer; } #if 0 // Test for missaligning the image data if(((int)input->lpCurrentBuffer&3) == 0) { int i; uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer; int missaligned = 1; //2 or 3 for(i=input->dwBlockLength-1; i>=0; i--) ptr[i+missaligned] = ptr[missaligned]; input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned]; input->lpCurrentWord = (uint8_t *)&ptr[missaligned]; } #endif //HACK // Unfortunately I need color matrix data deep within the codec for RT playback. if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input { if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER) { //int size = cfhddata->size; size_t size = cfhddata->size; memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); if (size > sizeof(CFHDDATA)) { // Limit the size to the known structure size = sizeof(CFHDDATA); } memcpy(&decoder->cfhddata, cfhddata, size); } } else { unsigned short value; if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA)) { memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER; decoder->cfhddata.size = sizeof(CFHDDATA); if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value)) { if(value == COLOR_FORMAT_RG48) { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(value == COLOR_FORMAT_RG64) { decoder->cfhddata.cfhd_subtype = 3; //RGBA decoder->cfhddata.num_channels = 4; } else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END) { unsigned int format = BAYER_FORMAT_RED_GRN; decoder->cfhddata.cfhd_subtype = 1; //BAYER decoder->cfhddata.bayer_format = format; // default to Red-Grn decoder->cfhddata.version = CFHDDATA_VERSION; } } } } OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed); if(decoder->image_dev_only) // HACK we need to support 3D also. decoder->source_channels = 1; else decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0); if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override)) { //int channels = 0; int channel_blend_type = BLEND_NONE; int channel_swapped_flags = 0; if(decoder->cfhddata.MSCTV_Override) { channel_mask = decoder->cfhddata.MSCTV_Override&0xff; channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff); } else { channel_mask = decoder->cfhddata.MSChannel_type_value&0xff; channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff); } if(channel_mask != 3) { channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } //if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302 { if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 2); // 3D work } } else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 1); // 3D work } else { //assume second channel decode SkipVideoChannel(decoder, input, 2); // 3D work } channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel { channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if((channel_mask&3) == 3) // A+B 3d work { channel_decodes = 2; decoder->channel_mix_half_res = 0; if(channel_blend_type != BLEND_NONE) { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { //if(decoder->frame.format == DECODED_FORMAT_W13A) // { // decoder->frame.format = internal_format = DECODED_FORMAT_W13A; // } //else //{ // decoder->frame.format = internal_format = DECODED_FORMAT_RG64; // } decoder->frame.format = internal_format = DECODED_FORMAT_RGB32; local_pitch = decoder->frame.width * 4; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RGB24; local_pitch = decoder->frame.width * 3; //RGB24 } /* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && (output_format == DECODED_FORMAT_YUYV || output_format == DECODED_FORMAT_UYVY)) { if( channel_blend_type == BLEND_FREEVIEW || ((channel_blend_type == BLEND_STACKED_ANAMORPHIC || channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch = (decoder->frame.width) * 3; //RGB24 } } */ } /* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA { decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; }*/ /* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } */ if( decoder->frame.resolution == DECODED_RESOLUTION_FULL && (channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW)) { if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->sample_uncompressed) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } else { if(decoder->preformatted_3D_type > BLEND_NONE) { // leave as is. } else if(FORMAT8BIT(output_format)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL; decoder->frame.width /= 2; local_pitch /= 2; } } } else { if(FORMAT8BIT(output_format)) decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } //TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; } if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2 { channel_offset = local_pitch * (decoder->frame.height); } else if(channel_blend_type >= BLEND_ANAGLYPH_RC) { /* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph { //B&W using YUYV decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 }*/ channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; } else if(channel_blend_type == BLEND_NONE) // double high { channel_offset = pitch * decoder->frame.height; } else { channel_blend_type = BLEND_STACKED_ANAMORPHIC; channel_offset = pitch * (decoder->frame.height/2); } // fields, stacked, etc, only works on full or half res. if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED && decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail. { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } if (channel_blend_type != BLEND_NONE && (output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 )) { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } } } decoder->channel_decodes = channel_decodes; decoder->channel_blend_type = channel_blend_type; decoder->channel_swapped_flags = channel_swapped_flags; } else { decoder->channel_decodes = channel_decodes = 1; decoder->channel_blend_type = BLEND_NONE; decoder->channel_swapped_flags = 0; } if(cfhddata) // So the P-frames can know the bayerformat { //int size = cfhddata->size; size_t size = cfhddata->size; if (size > sizeof(CFHDDATA)) { size = sizeof(CFHDDATA); } memcpy(cfhddata, &decoder->cfhddata, size); } { bool doOrientation = true; bool doFraming = true; bool doBurins = true; bool doImageflips = true; bool doGhostBust = false; bool doPrimaries = true; int process_path_flags = decoder->cfhddata.process_path_flags; int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask; if(decoder->basic_only) { doOrientation = false; doFraming = false; doBurins = false; doImageflips = false; doPrimaries = false; } else { if(decoder->cfhddata.process_path_flags_mask) { //DAN20101007 -- if(process_path_flags == 0) decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask; process_path_flags &= decoder->cfhddata.process_path_flags_mask; if(process_path_flags_mask & PROCESSING_ACTIVE2) { if(!(process_path_flags_mask & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags_mask & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags_mask & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS)) doImageflips = false; } if(!(process_path_flags_mask & PROCESSING_COLORMATRIX)) doPrimaries = false; } if(process_path_flags & PROCESSING_ACTIVE2) { if(!(process_path_flags & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags & PROCESSING_IMAGEFLIPS)) doImageflips = false; if(!(process_path_flags & PROCESSING_COLORMATRIX)) doPrimaries = false; } } if(doOrientation) process_path_flags |= PROCESSING_ORIENTATION; if(doFraming) process_path_flags |= PROCESSING_FRAMING; if(doBurins) process_path_flags |= PROCESSING_BURNINS; if(doImageflips) process_path_flags |= PROCESSING_IMAGEFLIPS; if(doPrimaries) process_path_flags |= PROCESSING_COLORMATRIX; if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { doGhostBust = true; } } decoder->cfhddata.process_path_flags = process_path_flags; if((!decoder->basic_only && (doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL || decoder->cfhddata.channel[0].FloatingWindowMaskR || decoder->cfhddata.channel[0].FrameKeyStone || decoder->cfhddata.channel[0].FrameTilt || decoder->cfhddata.channel[0].HorizontalOffset || decoder->cfhddata.channel[0].VerticalOffset || decoder->cfhddata.channel[0].RotationOffset || decoder->cfhddata.channel[1].FloatingWindowMaskL || decoder->cfhddata.channel[1].FloatingWindowMaskR || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[1].HorizontalOffset || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FloatingWindowMaskL || decoder->cfhddata.channel[2].FloatingWindowMaskR || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].FrameTilt || decoder->cfhddata.channel[2].HorizontalOffset || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0))) || (doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[2].user_blur_sharpen != 0.0)) || (doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 || decoder->cfhddata.channel[1].user_vignette_start != 0.0 || decoder->cfhddata.channel[2].user_vignette_start != 0.0)) || (doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) || (doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2)) || (doImageflips && decoder->cfhddata.channel_flip) || (decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) || (decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) || (decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes. ( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) || decoder->sample_uncompressed) || (decoder->cfhddata.doMesh) ) { if( output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 ) { // no manipulation should be applied } else { use_local_buffer = true; local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48 if(decoder->image_dev_only) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_W13A; local_pitch = ((decoder->frame.width+7)/8)*8 * 8; } else { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { local_pitch *= 2; // need horizontal room to make 3D side by side frame } /* if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A) { // preserve HDR decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output if(output_format == DECODED_FORMAT_W13A) local_pitch = decoder->frame.width * 8; } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.format = internal_format = DECODED_FORMAT_RG64; local_pitch = decoder->frame.width * 8; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RG48; } }*/ channel_offset = local_pitch * (decoder->frame.height); } } } if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { if(decoder->BYR4LinearRestore == NULL) { int j,val; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; //int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; float encode_curvebase; if(encode_curve_type) //1 or 2 { if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } else { encode_curve_type = CURVE_TYPE_LOG; encode_curvebase = 90.0; } #if _ALLOCATOR decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16); #else decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16); #endif for(j=0; j<16384; j++) //0 to 1 { switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = (int)(CURVE_LOG2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_GAMMA: val = (int)(CURVE_GAM2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINEON: val = (int)(CURVE_CINEON2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINE985: val = (int)(CURVE_CINE9852LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_PARA: val = (int)(CURVE_PARA2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f); break; case CURVE_TYPE_CSTYLE: val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f); break; case CURVE_TYPE_SLOG: val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LOGC: val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LINEAR: default: val = j; break; } if(val < 0) val = 0; if(val > 65535) val = 65535; decoder->BYR4LinearRestore[j] = val; } } } //DAN20120319 - removed /*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size { local_pitch *= 2; channel_offset = local_pitch * (decoder->frame.height*2); }*/ if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats { int stereoframesize = channel_offset * channel_decodes/*stacked frames*/; if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE) stereoframesize = channel_offset; if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize) { #if _ALLOCATOR if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #else if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #endif assert(decoder->StereoBuffer != NULL); if (! (decoder->StereoBuffer != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->StereoBufferSize = stereoframesize; } decoder->StereoBufferFormat = internal_format; local_buffer = (uint8_t *)decoder->StereoBuffer; local_output = local_buffer; } DecodeEntropyInit(decoder); //swapped -- Maybe useful for double height decodes. /* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED) { local_output += channel_offset; channel_offset = -channel_offset; }*/ decoder->use_local_buffer = use_local_buffer ? 1 : 0; if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1) { int encoded_width = decoder->frame.width; int encoded_height = decoder->frame.height; if (decoder->frame.resolution == DECODED_RESOLUTION_HALF) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; encoded_height *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 4; encoded_height *= 4; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL) { // Compute the encoded dimensions from the frame dimensions encoded_height *= 2; } #if _ALLOCATOR decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #else decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl; DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #endif } // Using the parallel decoder? if (decoder->parallelDecoder) { // Initialize the parallel decoder with parameters from the regular decoder memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA)); memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16); DecodeEntropyInit(decoder->parallelDecoder); DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize); decoder->parallelDecoder->channel_decodes = decoder->channel_decodes; decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type; decoder->parallelDecoder->flags = decoder->flags; decoder->parallelDecoder->frame = decoder->frame; decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0; decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format; if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0) { CreateLock(&decoder->parallelDecoder->decoder_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool, 1, // ParallelThreadProc, decoder->parallelDecoder); } } if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count) { // Second stream as a thread. BITSTREAM second_input = *input; if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap { BITSTREAM leftEye_input = *input; SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work *input = leftEye_input; SkipVideoChannel(decoder, &second_input, 1); // 3D work } else SkipVideoChannel(decoder, &second_input, 2); // 3D work decoder->channel_current = 0; decoder->parallelDecoder->channel_current = 1; // Instead of reading the metadata databases again, use the ones in the main decoder OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed); // DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.) // OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed); //OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed); // Hack, this gets lost decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position; #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { DrawStartThreaded(decoder); } } #endif // Post a message to the mailbox decoder->parallelDecoder->decoder_thread.input = &second_input; if(use_local_buffer == false && (decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24)) { decoder->parallelDecoder->decoder_thread.output = local_output; local_output += channel_offset; } else { decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset; } decoder->parallelDecoder->decoder_thread.pitch = local_pitch; decoder->parallelDecoder->decoder_thread.colorparams = colorparams; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1); // Start the transform worker threads ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START); // do the first channel { TAGVALUE segment; int sample_type; #if _THREADED decoder->entropy_worker_new.next_queue_num = 0; decoder->entropy_worker_new.threads_used = 0; #endif // Get the type of sample segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool); } else { while(channel_decodes > 0) { TAGVALUE segment; int sample_type; local_decoder->channel_current = channel_current++; //OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed); #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) //overlays / tools { DrawStartThreaded(decoder); } } #endif #if _THREADED local_decoder->entropy_worker_new.next_queue_num = 0; local_decoder->entropy_worker_new.threads_used = 0; #endif if(decoder->image_dev_only) { result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); } else { // Get the type of sample segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { local_decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset)) { channel_decodes = 0; } else { channel_decodes--; local_output += channel_offset; if(decoder->parallelDecoder) { local_decoder = decoder->parallelDecoder; } } } } if(use_local_buffer && output) { decoder->use_local_buffer = 0; #if WARPSTUFF WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); #endif ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset)); } else { #if WARPSTUFF WarpFrame(decoder, output, pitch, output_format); MaskFrame(decoder, output, pitch, output_format); #endif } if(decoder->channel_mix_half_res) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; decoder->frame.height *= 2; decoder->channel_mix_half_res = 0; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; } #if _GRAPHICS if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { PaintFrame(decoder, output, pitch, output_format); } #endif STOP(tk_decompress); // Return indication of whether decoding succeeded or failed return result; } // Decode a sample that encoded a group of frames (return the first frame) bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); #if (0 && DEBUG) // Force quarter resolution decoding for debug that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoding sample group\n"); } #endif START(tk_decoding); // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream //segment = GetTagValue(input); segment = GetSegment(input); assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) break; // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_complete; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t* position = codec->channel_position + channel_size; // Get the temporal wavelet int temporal_index = 2; TRANSFORM* transform = decoder->transform[channel]; IMAGE* wavelet = transform->wavelet[temporal_index]; if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } #if (0 && DEBUG) if (IsBandValid(wavelet, HIGHPASS_BAND)) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Temporal-decode-%d-", count); DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL); } count++; } #endif #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the temporal wavelet been decoded? //if (wavelet && BANDS_ALL_VALID(wavelet)) if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the temporal inverse transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, temporal_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 0 ); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? else if (wavelet == NULL) { // The temporal wavelet is not created during quarter resolution decoding // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_complete: STOP(tk_decoding); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int channel; for (channel = 0; channel < codec->num_channels; channel++) { TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[2]; uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND]; int height = wavelet->height; int pitch = wavelet->pitch; int size = height * pitch; int band; for (band = 0; band < wavelet->num_bands; band++) { sprintf(label, "Temporal channel: %d, band: %d", channel, band); DumpBandStatistics(label, wavelet, band, logfile); #if 0 sprintf(label, "Temporal-channel%d-band%d-", channel, band); DumpBandPGM(label, wavelet, band, NULL); #endif } assert(size > 0); ZeroMemory(data, size); } } #endif if (result) { // Two frames have been decoded decoder->gop_length = 2; decoder->frame_count += 2; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame in the group if (!decoder->no_output) { #if 0 // Decoding to quarter frame resolution at full frame rate? if (resolution == DECODED_RESOLUTION_QUARTER) { int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; char *buffer = decoder->buffer; size_t buffer_size = decoder->buffer_size; uint8_t *frame1 = output; uint8_t *frame2 = decoder->output2; assert(frame2 != NULL); // Reconstruct two frames at quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame1, frame2, pitch, info, buffer, buffer_size); } else #endif // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that represents the second frame in a group bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; bool result = true; START(tk_decoding); // Decode the tag value pairs in the frame sample for (;;) { TAGWORD tag; TAGWORD value; // Read the next tag value pair from the bitstream //TAGVALUE segment = GetTagValue(input); TAGVALUE segment = GetSegment(input); //assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair tag = segment.tuple.tag; value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; } // End of the frame header? if (tag == CODEC_TAG_FRAME_INDEX) break; } STOP(tk_decoding); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif if (result) { // Return the second frame in the group // assert(decoder->gop_length >= 2); if (decoder->gop_length >= 2) { int frame_index = 1; // Display the second frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } else if (decoder->gop_length > 0) { int frame_index = 0; // Display the first frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Frame type that is not handled // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that encodes an intra frame bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; int skipchan = 0; static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); START(tk_decoding); if(decoder->image_dev_only) goto decoding_completeI; // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); //Force V210 output for debugging ***DEBUG*** //decoder->frame.format = DECODED_FORMAT_V210; // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream segment = GetSegment(input); //assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) { break; } // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { skipchan++; if(skipchan > 5) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_completeI; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; if (channel_size == 0) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } uint8_t *position = codec->channel_position + channel_size; // Get the highest wavelet in the pyramid int wavelet_index = 2; TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[wavelet_index]; if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the wavelet been decoded? if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, wavelet_index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif #if (0 & DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the inverse spatial transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, wavelet_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 0); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? //else if (wavelet == NULL) else { // The wavelet may not have been created during quarter resolution decoding // The wavelet should have been created if all bands are valid assert(wavelet != NULL); // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_completeI: STOP(tk_decoding); if (result) { // One frame has been decoded decoder->gop_length = 1; decoder->frame_count += 1; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame (the only frame that was decoded) if (!decoder->no_output) { int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { //CODEC_STATE *codec = &decoder->codec; TRANSFORM **transform_array = decoder->transform; int num_channels = codec->num_channels; //int progressive = codec->progressive; FRAME_INFO *info = &decoder->frame; int precision = codec->precision; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision); } else { // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_ERROR error = CODEC_ERROR_OKAY; CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; CHANNEL_HEADER header; TRANSFORM *transform = decoder->transform[channel]; TRANSFORM *next_transform; // Advance to the next channel channel++; // Get the next transform for decoded information //TRANSFORM *next_transform = AllocGroupTransform(group, channel); // Decode the rest of the channel header error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL); //assert(error == CODEC_ERROR_OKAY); decoder->error = error; if (error != CODEC_ERROR_OKAY) return false; // The decoder is not able to skip channels //assert(header.channel == channel); if (header.channel != channel) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } // Initialize the next transform using the previous one next_transform = decoder->transform[channel]; InitChannelTransform(next_transform, transform); // Update the channel codec->channel = channel; // Reset the subband counter codec->band.subband = 0; // Reset the decoded subband flags codec->decoded_subband_flags = 0; // Loop back to decode the next channel //transform = next_transform; return true; } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int *subband_wavelet_index = decoder->subband_wavelet_index; // Used for quarter resolution and threaded decoding int transform_type = transform->type; // Wavelet parameters int width; int height; int level; int type; int band; int threading = 1; // Wavelet containing the band to decode int index; IMAGE *wavelet = NULL; bool result; if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS) threading = 0; // Update the transform data structure from the codec state UpdateCodecTransform(transform, codec); // Is this an empty band? if (subband == 255) { // Decode an empty band // This wavelet is the temporal wavelet index = 2; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; // The empty band should be the highpass band in a temporal wavelet //assert(type == WAVELET_TYPE_TEMPORAL && band == 1); if (!(type == WAVELET_TYPE_TEMPORAL && band == 1)) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // Set the wavelet parameters wavelet->pixel_type[band] = PIXEL_TYPE_16S; wavelet->num_bands = 2; result = DecodeSampleEmptyBand(decoder, input, wavelet, band); // Set the subband number for the next band expected in the bitstream codec->band.subband = 11; } // Is this a highpass band? else if (subband > 0 && subband < CODEC_MAX_SUBBANDS) { // Decode a highpass band // Get the wavelet that contains this subband index = subband_wavelet_index[subband]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandStartedFlags(decoder, wavelet, band); } // Reset the default encoding method codec->band.encoding = BAND_ENCODING_RUNLENGTHS; // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } else { // Decode a lowpass band // Get the wavelet that contains this subband index = subband_wavelet_index[0]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->lowpass.width; height = codec->lowpass.height; level = codec->lowpass.level; type = codec->first_wavelet; //band = codec->band.number; band = 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // The lowpass data is always stored in wavelet band zero assert(band == 0); // The lowpass band must be subband zero assert(subband == 0); result = DecodeSampleLowPassBand(decoder, input, wavelet); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } // Was the subband successfully decoded? if (result) { // The transform will set the band valid flag if this is the temporal wavelet //if (index != 2) // Record that this subband has been decoded successfully if (0 <= subband && subband <= CODEC_MAX_SUBBAND) codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n", subband, index, channel); } #endif } #if _THREADED_DECODER // Ready to queue a threaded transform to invert this wavelet? if (BANDS_ALL_STARTED(wavelet)) { // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2) { if(decoder->entropy_worker_new.pool.thread_count && threading) { ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 1); // Add the inverse wavelet transform to the processing queue QueueThreadedTransform(decoder, codec->channel, index); } else { // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 0); } } } #else // Ready to invert this wavelet to get the lowpass band in the lower wavelet? if (BANDS_ALL_VALID(wavelet)) { int channel = codec->channel; //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0); } #endif return result; } // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; bool result = true; int lowpass_width; // Lowpass band dimensions int lowpass_height; int lowpass_pitch; PIXEL *pLowPassRow; // Pointer into the lowpass band //int wavelet_width; // Dimensions of the wavelet image //int wavelet_height; int bits_per_pixel; int quantization; int offset; //int pixel_divisor = (1 << (2 * codec->lowpass.level)); int row, column; int32_t solid_color = -1; const int gain = 128; const int colorshift = 0; // int channelgain[4]; //int waterrow=19, watercol=214; //int cspace = decoder->frame.colorspace; // Lowpass image dimensions may be smaller than the wavelet dimensions // because the encoder may have transmitted an image without the border lowpass_width = codec->lowpass.width; lowpass_height = codec->lowpass.height; lowpass_pitch = wavelet->pitch/sizeof(PIXEL); pLowPassRow = wavelet->band[0]; // Get the parameters for quantization performed by the encoder quantization = codec->lowpass.quantization; offset = codec->lowpass.pixel_offset; bits_per_pixel = codec->lowpass.bits_per_pixel; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode lowpass subband\n"); } #endif if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1)) { int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord; //int signval = 0; //int channel3stats = 0; int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { switch(decoder->frame.format) { case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301 break; default: channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601 } if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?) channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames } else if(decoder->codec.precision == 12) { switch(decoder->frame.format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: channeloffset = 8; //DAN200906010 break; // 16-bit precision: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: channeloffset = 0; break; case DECODED_FORMAT_RG30: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed. break; default: channeloffset = 0; break; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames channeloffset = 0; #define DUMPLL 0 #if (_DEBUG && DUMPLL) FILE *fp; if(channel == 0) { static int inc = 1; char name[256]; sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++); fp = fopen(name,"w"); fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height); } #endif #if LOSSLESS channeloffset = 0; //LOSSLESS #endif //if(lpCurrentLong[0] == 0xffffffff) if(lpCurrentLong[0] == (int32_t)UINT32_MAX) { if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width) { if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height) { solid_color = SwapInt32BtoN(lpCurrentLong[1]); solid_color |= (solid_color<<16); lpCurrentLong += 4; } } } // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { int pixels; // Start at the first column column = 0; // Process the rest of the row { for (; column < lowpass_width; column++) { int pixel_value; //int i; // Perform inverse quantization if(column & 1) { pixel_value = pixels; } else { //pixels = _bswap(*(lpCurrentLong++)); if(solid_color == -1) pixels = SwapInt32BtoN(*(lpCurrentLong++)); else pixels = solid_color; pixel_value = (pixels>>16); pixels <<= 16; pixels >>= 16; } // Store the pixel in the lowpass band of the wavelet pixel_value += channeloffset; // pixel_value -= 64; // pixel_value += ((rand() & 0x7fff) - 0x4000); // if(pixel_value < 0) pixel_value = 0; if(pixel_value > 0x7fff) pixel_value = 0x7fff; pLowPassRow[column] = pixel_value; #if (_DEBUG && DUMPLL) if(channel==0 && fp) fprintf(fp, "%d\n", pixel_value>>7); #endif } } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if (_DEBUG && DUMPLL) if(channel == 0 && fp) fclose(fp); #endif #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentLong; } else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE) { uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord; //int signval = 0; // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { // Start at the first column column = 0; // Process the rest of the row for (; column < lowpass_width; column++) { int pixel_value = *(lpCurrentByte++); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif pixel_value -= 128 * quantization; pixel_value *= gain; pixel_value >>= 7; pixel_value += 128 * quantization; pixel_value += colorshift; // Store the pixel in the lowpass band of the wavelet // Multiply by 16 to turn 8-bit into the new 16-bit format pLowPassRow[column] = pixel_value * 16; } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentByte; } else { int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { channeloffset = (codec->num_frames==2 ? 10 : 5); } else if(decoder->codec.precision == 12) { // channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images } //DAN20050923 no longer trying to compensate for YUV to RGB issues. if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32) { if(decoder->codec.precision == 8) { switch(channel) { case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 16; break; case 2: channeloffset += 10; break; } } else if(decoder->codec.precision == 10) { switch(channel) { case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += -4; break; case 2: channeloffset += -4; break; } } else if(decoder->codec.precision == 12) { switch(channel) { case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 0; break; case 2: channeloffset += 0; break; } } } if(bits_per_pixel != 16) channeloffset = 0; for (row = 0; row < lowpass_height; row++) { for (column = 0; column < lowpass_width; column++) { int pixel_value = GetBits(stream, bits_per_pixel); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif // Store the pixel in the lowpass band of the wavelet pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes } stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3); // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } } // Set the wavelet scale factor wavelet->scale[0] = quantization; // Align the bitstream to the next tag value pair AlignBitsTag(stream); // Return indication of lowpass decoding success return result; } // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int index = codec->highpass.wavelet_number; int width; int height; int quantization; // The encoder may not have used variable-length coding int method = codec->band.encoding; bool result = true; // Check that the band index is in range //assert(0 <= band && band <= codec->max_subband); if (!(0 <= band && band <= codec->max_subband)) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } // Encoded coefficients start on a tag boundary AlignBitsTag(stream); #if (0 && DEBUG) // Dump the band header to the logfile if (logfile) { fprintf(logfile, "Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n", header->marker, header->subband, header->width, header->height, header->encoding); } #endif // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply this parameter) if (codec->band.scale > 0) { wavelet->scale[band] = codec->band.scale; } // Get the quantization factor that was used to encode the band coefficients quantization = codec->band.quantization; // Copy the quantization into the wavelet wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization); } #endif // Get the highpass band dimensions width = codec->band.width; height = codec->band.height; // Is this a special band for the temporal high pass thumbnail? if (method == BAND_ENCODING_LOSSLESS) { //lossless temporal subband //DAN20060701 result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else if (method == BAND_ENCODING_16BIT) { //lossless temporal subband //DAN20060701 result = DecodeBand16s(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else { // Must use the runlength encoding method //assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS); if (codec->band.encoding != BAND_ENCODING_RUNLENGTHS) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } #if 0 // This code attempts to not decode various subbands for 1/4 res decodes. // Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.) if (subband >= 4 && subband <= 6) { TAGVALUE segment; AlignBitsTag(stream); do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } else #elif 0 // Is this subband required for decoding the frame? if (CanSkipSubband(decoder, subband)) { // Skip past the end of this subband SkipSubband(stream); } #endif // Decode this subband result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading); } // Return failure if a problem was encountered while reading the band coefficients if (!result) return result; // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band %d trailer: %d\n", band, error); } #endif return false; } return result; } // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int quantization; // Check that the band is in range assert(0 <= band && band <= CODEC_MAX_HIGHBANDS); // Check that the highpass band is 16 bits assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S); #if (0 && DEBUG) //TODO: Change format string to handle 64-bit pointers if (logfile) { fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord); } #endif // Encoded coefficients must start on a word boundary AlignBits(stream); // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply the parameter) if (codec->band.scale > 0) wavelet->scale[band] = codec->band.scale; // Set the quantization used to encode the band coefficients quantization = codec->band.quantization; wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { DumpBits(stream, logfile); } #endif // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band: %d, error: %d\n", band, error); } #endif return false; } // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); #if (0 && DEBUG) // Dump the band trailer to the logfile if (logfile) { fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker); } #endif #if (0 && DEBUG) if (logfile) { //TODO: Change format string to handle 64-bit pointers fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord); } #endif return true; } bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { PIXEL *rowptr = wavelet->band[band_index]; int pitch = wavelet->pitch; int row,dequant = wavelet->quantization[band_index]; // Convert the pitch from bytes to pixels pitch /= sizeof(PIXEL); //BAND_ENCODING_16BIT if(dequant == 1) { for (row = 0; row < height; row++) { int column; #if 0 for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value; } #else // Mild speedup (2.5% overall half-res decode improvement.) char *sptr = (char *)stream->lpCurrentWord; char *dptr = (char *)rowptr; for (column = 0; column < width; column++) { *(dptr+1) = *sptr++; *dptr = *sptr++; dptr+=2; } stream->lpCurrentWord += width*2; stream->nWordsUsed += width*2; #endif rowptr += pitch; } } else { for (row = 0; row < height; row++) { int column; for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value*dequant; } rowptr += pitch; } } #if (0 && DEBUG) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Hightemp-decode-%d-", count); DumpBandPGM(label, wavelet, band_index, NULL); } count++; } #endif return true; } bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; //CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; //int difference_coding = decoder->codec.difference_coding; //int localquant = 1; //int threading = 0; decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet assert(wavelet != NULL); if (! (wavelet != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //Must have a valid FSM assert(fsm != NULL); if (! (fsm != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // All rows are treated as one int32_t row that covers the entire band size = fsm->table.num_states; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch; assert(rowptr != NULL && pitch != 0); if (! (rowptr != NULL && pitch != 0)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes. if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } if(quant) { int x,y; PIXEL *line = rowptr; if(quant == 32) { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] <<= 5; } line += pitch/2; } } else { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] *= quant; } line += pitch/2; } } } /* if(once <= 60) { char name[200]; FILE *fp; sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once); fp = fopen(name,"wb"); fwrite(rowptr,width*height,1,fp); fclose(fp); once++; }*/ assert(result == true); if (! (result == true)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } return true; } // Invert the wavelet to reconstruct the lower wavelet in the transform void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel, IMAGE *wavelet, int index, int precision, const SCRATCH *scratch, int allocations_only) { int transform_type = transform->type; int width = wavelet->width; int height = wavelet->height; int level = wavelet->level; PIXEL *buffer = (PIXEL *)scratch->free_ptr; size_t buffer_size = scratch->free_size; // Is the current wavelet a spatial wavelet? if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0) { // Reconstruct the lowpass band in the lower wavelet int lowpass_index = index - 1; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL; //const int prescale = 1; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif // Check that the lowpass band has not already been reconstructed //assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); if(!allocations_only) { // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Has this wavelet already been reconstructed? if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0) { // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } } // Is the current wavelet a spatial wavelet above the temporal lowpass band? else if (index > 3) { // Reconstruct the lowpass band in the lower wavelet const int temporal_wavelet_index = 2; int lowpass_index = (index > 4) ? index - 1 : index - 2; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); //const int prescale = 2; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif if(!allocations_only) { // Check that the lowpass band has not already been reconstructed assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the spatial wavelet above the temporal highpass band? else if (index == 3) { // Reconstruct the highpass band in the temporal wavelet const int temporal_wavelet_index = 2; int highpass_index = index - 1; IMAGE *highpass = transform->wavelet[highpass_index]; int highpass_width = 2 * width; int highpass_height = 2 * height; int highpass_level = level - 1; int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = inverse_prescale ? transform->prescale[index] : 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety highpass = GetWaveletThreadSafe(decoder, transform, highpass_index, highpass_width, highpass_height, highpass_level, highpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type); #else highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type); #endif transform->wavelet[highpass_index] = highpass; #endif if(!allocations_only) { // Check that the highpass band has not already been reconstructed assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0); // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, highpass, 1); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the temporal wavelet? else if (index == 2) { // Get the temporal wavelet IMAGE *temporal = wavelet; // Set the frame wavelet parameters int frame_level = 1; int frame_type = WAVELET_TYPE_FRAME; // Get the two frame wavelets IMAGE *frame[2]; frame[0] = transform->wavelet[0]; frame[1] = transform->wavelet[1]; // Check that the temporal wavelet is valid //assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL); if (!(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } #if _THREADED_DECODER // Allocate (or reallocate) the frame wavelets with thread safety frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type); frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type); #else // Allocate the frame wavelets if not already allocated #if _ALLOCATOR frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type); #else frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type); #endif transform->wavelet[0] = frame[0]; transform->wavelet[1] = frame[1]; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Before inverse temporal transform"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); } #endif if(!allocations_only) { // Check that the lowpass bands have not already been reconstructed assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0); assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(temporal)); // Invert the temporal transform between the frame wavelets STOP(tk_decoding); START(tk_inverse); TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision); STOP(tk_inverse); START(tk_decoding); #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = quad[0]; fprintf(logfile, "After inverse temporal transform\n"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, frame[0], 0); UpdateWaveletBandValidFlags(decoder, frame[1], 0); #if TIMING // Increment the number of temporal transforms performed outside of decoding temporal_decoding_count++; #endif } } } // Compute the dimensions of the output buffer void ComputeOutputDimensions(DECODER *decoder, int frame, int *decoded_width_out, int *decoded_height_out) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet = NULL; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; int decoded_scale = 0; if (decoded_width_out == NULL || decoded_height_out == NULL) { return; } // Clear the return values in case this routine terminates early *decoded_width_out = 0; *decoded_height_out = 0; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; } else { decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; } break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; break; default: assert(0); break; } // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) decoded_width = wavelet_width; else decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif // Return the decoded width and height *decoded_width_out = decoded_width; *decoded_height_out = decoded_height; } #define DEBUG_ROW16U 0 void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { FRAME_INFO local_info; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &local_info; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = decoder->frame.resolution; int chroma_offset = decoder->codec.chroma_offset; int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; //TODO: Change this routine to return the codec error code CODEC_ERROR error = CODEC_ERROR_OKAY; //if(decoder->cfhddata.calibration) // LoadTweak(); //TODO: Change this routine to return an error code if (decoder == NULL) { return; } decoder->gop_frame_num = frame; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif //return; // copy frame info in a changable local structure memcpy(info, &decoder->frame, sizeof(FRAME_INFO)); // Use the old code for reconstructing the frame #if (0 && DEBUG) // Force quarter resolution decoding for debugging that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame); } #endif // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) return; #if (1 && DEBUG_ROW16U) // Force decoding to 16-bit pixels for debugging info->format = DECODED_FORMAT_YR16; #endif #if 0 if (info->format == DECODED_FORMAT_YR16) { // Force interlaced or progressive decoding for debugging //progressive = false; progressive = true; } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder discarding frame: %d\n", frame); } #endif return; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { decoder->error = CODEC_ERROR_RESOLUTION; return; } #if (0 && TIMING) //(0 && DEBUG) // Override progressive flag read from the bitstream for debugging //progressive = 0; // Use the inverse frame transform progressive = 1; // Use the inverse spatial transform #endif // Build the 3D LUTs if needed ComputeCube(decoder); //HACK DAN20110131 -- some formats will not directly decode so need to use the AM route { if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && resolution == DECODED_RESOLUTION_HALF) { if( decoder->frame.format == COLOR_FORMAT_R408 || decoder->frame.format == COLOR_FORMAT_V408) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } if( decoder->frame.format == COLOR_FORMAT_NV12) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; // TODO, make it work with this. } if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } // Get the decoding scale if(!uncompressed) { switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = 2 * wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_HALF: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_HALF_HORIZONTAL: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; } else { wavelet = transform_array[0]->wavelet[3]; } // Get the decoded frame dimensions //assert(wavelet != NULL); if(wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_LOWPASS_ONLY: wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; default: assert(0); break; } } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { decoded_width = info->width/2; decoded_height = info->height/2; } else { decoded_width = info->width; decoded_height = info->height; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { if(resolution == DECODED_RESOLUTION_FULL) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { if(decoded_width*2 == info->width) { info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } } else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER; } } else { if(resolution == DECODED_RESOLUTION_HALF) { if(decoded_width*2 == info->width) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL; } } else if(resolution == DECODED_RESOLUTION_QUARTER) { if(uncompressed) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED; } else { if(decoded_width == info->width) { info->resolution = resolution = DECODED_RESOLUTION_HALF; } } } } } if(uncompressed) { // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210) error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0) error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } else { if (decoder->codec.num_channels < 3 || decoder->codec.num_channels > 4) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4 case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4 error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); //error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 // Add new code here for the final steps in decoding the original YUV 4:2:2 format error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch); break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } // Was the newer code able to successfully reconstruct the frame? if (error != CODEC_ERROR_UNSUPPORTED_FORMAT) { // Save the codec error code in the decoder state and return decoder->error = error; return; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); assert((info->height+7)/8 >= (decoded_height+7)/8); if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) { decoder->error = CODEC_ERROR_FRAMESIZE; return; } #if (0 && DEBUG) if (logfile) { //SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16); SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16); // Adjust the subimage to be at the middle of the right border //subimage.row += wavelet_height/2 - 8; DumpBand("SIF Image", wavelet, 0, &subimage, logfile); } #endif START(tk_inverse); if (resolution == DECODED_RESOLUTION_QUARTER) { int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); } else // Was the first transform a frame transform (used for interlaced frames)? if (!progressive) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY)) { // Apply the inverse frame transform and pack the results into the output buffer int precision = codec->precision; #if (0 && DEBUG) DumpWaveletBandsPGM(wavelet, frame, num_channels); #endif #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; int precision = codec->precision; #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int scale = 13; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { int precision = codec->precision; TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); } } } else // The first transform was a spatial transform (used for progressive frames) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { uint8_t *pixoutput = output; if(decoder->use_active_metadata_decoder) //WIP { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToBayerYUV); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); } #else //TODO : Accelerated BAYER for single thread decoding. assert(0); // Transform the wavelets for each channel to the output image (not threaded) //TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info, // &decoder->scratch, chroma_offset, precision); #endif } else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER && (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED { uint8_t *pixoutput = output; if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { pixoutput += (info->height-1)*pitch; pitch = -pitch; } TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma int precision = codec->precision; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } STOP(tk_inverse); #if 1 //|| BAYER_SUPPORT if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[4096*3],*sptr; //unsigned short scanline2[4096*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; IMAGE *gd_image = lowpass_images[3]; uint8_t *outyuv,*line = output; PIXEL *bayer_line, *bayerptr; PIXEL *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; int format = info->format; bool inverted = false; int maxbound = 4095; //10-bit source int midpoint = 32768>>3; int shift = 4; if(precision == 12) { maxbound = 16383; midpoint = 32768>>1; shift = 2; } if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32) { inverted = true; line += (info->height-1)*pitch; pitch = -pitch; } scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; for(y=0; y<info->height; y++) { uint8_t *newline = line; PIXEL *newG=G,*newRG=RG,*newBG=BG; PIXEL *gptr,*rgptr,*bgptr,*gdptr; int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; newline += pitch*y; newG += y * (g_image->pitch / sizeof(PIXEL)); newRG += y * (rg_image->pitch / sizeof(PIXEL)); newBG += y * (bg_image->pitch / sizeof(PIXEL)); gptr = newG; rgptr = newRG; bgptr = newBG; sptr = scanline; for(x=0; x<info->width; x++) { g = (*gptr++); if(g > maxbound) g = maxbound; rg = (*rgptr++); bg = (*bgptr++); r = (rg<<1) - midpoint + g; b = (bg<<1) - midpoint + g; if(r > maxbound) r = maxbound; if(b > maxbound) b = maxbound; if(r < 0) r = 0; if(g < 0) g = 0; if(b < 0) b = 0; *sptr++ = r<<shift; *sptr++ = g<<shift; *sptr++ = b<<shift; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, newline, y, pitch, info->format, whitebitdepth, flags); } } #endif } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; uint8_t *line = output; unsigned char *rgb8; PIXEL *G,*RG,*BG; int x,y; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; if(info->format == DECODED_FORMAT_RGB32) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; *rgb8++ = 255; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } else if(info->format == DECODED_FORMAT_RGB24) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } } else #endif { CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } START(tk_inverse); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int width = info->width; int height = info->height; sprintf(label, "Output"); DumpBufferStatistics(label, output, width, height, pitch, logfile); } #endif } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { // Handle inversion of the output image in this routine FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(resolution == DECODED_RESOLUTION_FULL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } //#if BUILD_PROSPECT // Output the frame in V210 foramt? if( (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) && decoder->codec.encoded_format != ENCODED_FORMAT_BAYER ) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // The output buffer is an array of 10-bit pixels packed into double words #if 0 TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, buffer, buffer_size, chroma_offset, decoder->codec.precision); #else TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } else //#endif // Decoding a full resolution progressive frame to a Bayer output format? if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL); } //TODO: Replace this memory allocation with a scratch buffer allocation //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*3*sizeof(PIXEL); decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16); #endif decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL); } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } if(decoder->RawBayer16) { uint8_t *line; PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16; PIXEL16U *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; //float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; #if 0 static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5f} }; float mtrx[3][4] = { {1.0f, 0, 0, 0}, {0, 1.0f, 0, 0}, {0, 0, 1.0f, 0} }; float whitebalance[3] = { 1.0f, 1.0f, 1.0f }; #endif #if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions /* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2) { float fval = 0.0; int i; for(i=0; i<12; i++) { mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3]; if((i>>2) == (i&3)) { if(fval != 1.0) { matrix_non_unity = 1; } } else { if(fval != 0.0) { matrix_non_unity = 1; } } } // not active as VFW isn't yet support the 3D LUTs if(decoder->cfhddata.version >= 5) { int j; float encode_curvebase = 90.0; float decode_curvebase = 90.0; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; int decode_curve_type = decoder->cfhddata.decode_curve >> 16; if(decoder->cfhddata.user_white_balance[0] > 0.0) { wb_non_unity = 1; whitebalance[0] = decoder->cfhddata.user_white_balance[0]; whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0; whitebalance[2] = decoder->cfhddata.user_white_balance[3]; } if(encode_curve_type) //1 or 2 encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); else { encode_curve_type = 1; encode_curvebase = 90.0; } if(decode_curve_type) //1 or 2 decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff); else { decode_curve_type = 1; decode_curvebase = 90.0; } for(j=0; j<2048; j++) { if(encode_curve_type == 1) curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase); else curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase); } for(j=-512; j<=2048; j++) // -1 to +4 { if(encode_curve_type == CURVE_TYPE_LOG) lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase); else lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase); } } }*/ #endif #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, &decoder->scratch, chroma_offset, precision); #endif if(resolution == DECODED_RESOLUTION_FULL_DEBAYER && (info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4)) { #if _THREADED //DemosaicRAW WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* int bayer_format = decoder->cfhddata.bayer_format; unsigned char *outA8, *outB8; unsigned short *lineStartA16, *lineStartB16; unsigned short *lineA16, *lineB16; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++) { bayer_line = decoder->RawBayer16; bayer_line += bayer_pitch * y; if(y<info->height) { ColorDifference2Bayer(info->width, bayer_line, bayer_pitch, bayer_format); } if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline { unsigned short *delayptr = decoder->RawBayer16; delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES); BayerRippleFilter(info->width, delayptr, bayer_pitch, bayer_format, decoder->RawBayer16); } if(y>=DEMOSAIC_DELAYLINES) { int delay_y = y - DEMOSAIC_DELAYLINES; unsigned short *sptr, scanline[8192*3]; outA8 = line; line += pitch; outB8 = line; line += pitch; sptr = scanline; DebayerLine(info->width*2, info->height*2, delay_y*2, decoder->RawBayer16, bayer_format, sptr, sharpening); for(x=0; x<info->width*2; x++) { outA8[2] = *sptr++>>8; outA8[1] = *sptr++>>8; outA8[0] = *sptr++>>8; outA8+=3; } for(x=0; x<info->width*2; x++) { outB8[2] = *sptr++>>8; outB8[1] = *sptr++>>8; outB8[0] = *sptr++>>8; outB8+=3; } } }*/ #endif // _THREADED } else if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* { int bayer_format = decoder->cfhddata.bayer_format; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) // stats1+=g1; // stats2+=g2; // statsd+=gd; if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { int bayer_format = decoder->cfhddata.bayer_format; for(y=2; y<info->height-3; y++) { int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } }*/ #endif } // Pack the rows of Bayer data (full resolution progressive) into BYR3 format? else if (format == DECODED_FORMAT_BYR3) { PIXEL16U *outR, *outG1, *outG2, *outB; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; // #pragma omp parallel for for(y=0; y<info->height; y++) { uint8_t *line = output; PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16; line += pitch*2*y; bayerptr += bayer_pitch * y; outR = (PIXEL16U *)line; outG1 = outR + (pitch/4); outG2 = outR + (pitch/4)*2; outB = outR + (pitch/4)*3; G = (PIXEL16U *)bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; // Pack the rows of Bayer components into the BYR3 pattern #if (1 && XMMOPT) { __m128i *G_128 = (__m128i *)G; __m128i *RG_128 = (__m128i *)RG; __m128i *BG_128 = (__m128i *)BG; __m128i *GD_128 = (__m128i *)GD; __m128i *outR_128 = (__m128i *)outR; __m128i *outG1_128 = (__m128i *)outG1; __m128i *outG2_128 = (__m128i *)outG2; __m128i *outB_128 = (__m128i *)outB; __m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff); __m128i midpoint1 = _mm_set1_epi16(32768>>6); __m128i midpoint2 = _mm_set1_epi16(32768>>5); int column_step = 8; int post_column = (info->width) - ((info->width) % column_step); for (x=0; x < post_column; x += column_step) { __m128i r_128; __m128i g1_128; __m128i g2_128; __m128i b_128; __m128i g_128; __m128i rg_128; __m128i bg_128; __m128i gd_128; g_128 = _mm_load_si128(G_128++); rg_128 = _mm_load_si128(RG_128++); bg_128 = _mm_load_si128(BG_128++); gd_128 = _mm_load_si128(GD_128++); g_128 = _mm_srli_epi16(g_128, 6); rg_128 = _mm_srli_epi16(rg_128, 5); bg_128 = _mm_srli_epi16(bg_128, 5); gd_128 = _mm_srli_epi16(gd_128, 6); gd_128 = _mm_subs_epi16(gd_128, midpoint1); rg_128 = _mm_subs_epi16(rg_128, midpoint2); bg_128 = _mm_subs_epi16(bg_128, midpoint2); r_128 = _mm_adds_epi16(rg_128, g_128); b_128 = _mm_adds_epi16(bg_128, g_128); g1_128 = _mm_adds_epi16(g_128, gd_128); g2_128 = _mm_subs_epi16(g_128, gd_128); r_128 = _mm_adds_epi16(r_128, limiter); r_128 = _mm_subs_epu16(r_128, limiter); g1_128 = _mm_adds_epi16(g1_128, limiter); g1_128 = _mm_subs_epu16(g1_128, limiter); g2_128 = _mm_adds_epi16(g2_128, limiter); g2_128 = _mm_subs_epu16(g2_128, limiter); b_128 = _mm_adds_epi16(b_128, limiter); b_128 = _mm_subs_epu16(b_128, limiter); _mm_store_si128(outR_128++, r_128); _mm_store_si128(outG1_128++, g1_128); _mm_store_si128(outG2_128++, g2_128); _mm_store_si128(outB_128++, b_128); } G = (PIXEL16U *)G_128; RG = (PIXEL16U *)RG_128; BG = (PIXEL16U *)BG_128; GD = (PIXEL16U *)GD_128; outR = (PIXEL16U *)outR_128; outG1 = (PIXEL16U *)outG1_128; outG2 = (PIXEL16U *)outG2_128; outB = (PIXEL16U *)outB_128; } #endif for(; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; //Red-grn phase *outR++ = r>>6; *outG1++ = g1>>6; *outG2++ = g2>>6; *outB++ = b>>6; } } } // Pack the rows of Bayer data (full resolution progressive) into BYR4 format? else if (format == DECODED_FORMAT_BYR4) { int bayer_format = decoder->cfhddata.bayer_format; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { //int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; int32_t r, g, b, rg, bg, gd, g1, g2; // The output of the inverse transform is unsigned 16-bit integers const int midpoint = 32768; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - midpoint; r = ((rg - midpoint)<<1) + g; b = ((bg - midpoint)<<1) + g; g1 = g + gd; g2 = g - gd; r = SATURATE_16U(r); g1 = SATURATE_16U(g1); g2 = SATURATE_16U(g2); b = SATURATE_16U(b); // stats1+=g1; // stats2+=g2; // statsd+=gd; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; default: // Unsupported Bayer format assert(0); *outA16++ = 0; *outA16++ = 0; *outB16++ = 0; *outB16++ = 0; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { for(y=2; y<info->height-3; y++) { //int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } // Linear restore { unsigned short *buff = (unsigned short *)output; //static int pos = 0; for(y=0; y<info->height*2; y++) { for(x=0; x<info->width*2; x++) { float val = (float)buff[y*info->width*2 + x]/65535.0f; float encode_curvebase = 90.0; int encode_curve_type = CURVE_TYPE_LOG; int encode_curve_neg; if((decoder->cfhddata.encode_curve)>>16) //1 or 2 { encode_curve_type = (decoder->cfhddata.encode_curve)>>16; if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR) encode_curve_type = CURVE_TYPE_LINEAR; encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = CURVE_LOG2LIN(val,encode_curvebase); break; case CURVE_TYPE_GAMMA: val = CURVE_GAM2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINEON: val = CURVE_CINEON2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINE985: val = CURVE_CINE9852LIN(val,encode_curvebase); break; case CURVE_TYPE_PARA: val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)); break; case CURVE_TYPE_CSTYLE: val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)); break; case CURVE_TYPE_SLOG: val = CURVE_SLOG2LIN((float)val); break; case CURVE_TYPE_LOGC: val = CURVE_LOGC2LIN((float)val); break; case CURVE_TYPE_LINEAR: default: break; } buff[y*info->width*2 + x] = (int)(val*4095.0); } } } } else { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[8192*3],*sptr; //unsigned short scanline2[8192*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; uint8_t *outyuv,*line = output; PIXEL *bayerptr; int x,y; if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; __m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg; __m128i rrrrrrrr,bbbbbbbb; __m128i mid8192 = _mm_set1_epi16(8192); __m128i mid16384 = _mm_set1_epi16(16384); __m128i mid32768 = _mm_set1_epi16(32768); __m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff); int sse2width = info->width & 0xfff8; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; sptr = scanline; x = 0; for(; x<sse2width; x+=8) { gggggggg = _mm_loadu_si128((__m128i *)G); G+=8; rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8; bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8; ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767 bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767 //limit to 0 to 16383 rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16); rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16); //limit to 0 to 16383 bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16); bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16); rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535 bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535 *sptr++ = _mm_extract_epi16(rrrrrrrr, 0); *sptr++ = _mm_extract_epi16(gggggggg, 0); *sptr++ = _mm_extract_epi16(bbbbbbbb, 0); *sptr++ = _mm_extract_epi16(rrrrrrrr, 1); *sptr++ = _mm_extract_epi16(gggggggg, 1); *sptr++ = _mm_extract_epi16(bbbbbbbb, 1); *sptr++ = _mm_extract_epi16(rrrrrrrr, 2); *sptr++ = _mm_extract_epi16(gggggggg, 2); *sptr++ = _mm_extract_epi16(bbbbbbbb, 2); *sptr++ = _mm_extract_epi16(rrrrrrrr, 3); *sptr++ = _mm_extract_epi16(gggggggg, 3); *sptr++ = _mm_extract_epi16(bbbbbbbb, 3); *sptr++ = _mm_extract_epi16(rrrrrrrr, 4); *sptr++ = _mm_extract_epi16(gggggggg, 4); *sptr++ = _mm_extract_epi16(bbbbbbbb, 4); *sptr++ = _mm_extract_epi16(rrrrrrrr, 5); *sptr++ = _mm_extract_epi16(gggggggg, 5); *sptr++ = _mm_extract_epi16(bbbbbbbb, 5); *sptr++ = _mm_extract_epi16(rrrrrrrr, 6); *sptr++ = _mm_extract_epi16(gggggggg, 6); *sptr++ = _mm_extract_epi16(bbbbbbbb, 6); *sptr++ = _mm_extract_epi16(rrrrrrrr, 7); *sptr++ = _mm_extract_epi16(gggggggg, 7); *sptr++ = _mm_extract_epi16(bbbbbbbb, 7); } for(; x<info->width; x++) { g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; if(r < 0) r = 0; if(r > 0xffff) r = 0xffff; if(g < 0) g = 0; if(g > 0xffff) g = 0xffff; if(b < 0) b = 0; if(b > 0xffff) b = 0xffff; *sptr++ = r; *sptr++ = g; *sptr++ = b; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch, info->format, whitebitdepth, flags); } line += pitch; bayer_line += bayer_pitch; } #endif } /* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201 // Pack the rows of Bayer data (full resolution progressive) into BYR2 format? else if (format == DECODED_FORMAT_YUYV) { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale); r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; bayer_line += bayer_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; bayer_line = decoder->RawBayer16; scale = 16384.0; //_mm_empty(); // Clear the mmx register state y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale * 4.0); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale * 4.0); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale * 4.0); scale = 4096.0; r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; bayer_line += bayer_pitch; } } else //RGBs { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; r_rmult = (mtrx[0][0]) * scale * whitebalance[0]; r_gmult = (mtrx[0][1]) * scale * whitebalance[1]; r_bmult = (mtrx[0][2]) * scale * whitebalance[2]; r_offset= (mtrx[0][3]) * scale; g_rmult = (mtrx[1][0]) * scale * whitebalance[0]; g_gmult = (mtrx[1][1]) * scale * whitebalance[1]; g_bmult = (mtrx[1][2]) * scale * whitebalance[2]; g_offset= (mtrx[1][3]) * scale; b_rmult = (mtrx[2][0]) * scale * whitebalance[0]; b_gmult = (mtrx[2][1]) * scale * whitebalance[1]; b_bmult = (mtrx[2][2]) * scale * whitebalance[2]; b_offset= (mtrx[2][3]) * scale; for(y=0; y<info->height; y++) { int i,noisearray[32]; outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = RG + bayer_pitch/4; for(i=0; i<32; i++) { noisearray[i] = (rand() & 127); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; // g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO : need on convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO : need on convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; //g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO: Need to convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO: Need to convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } line += pitch; bayer_line += bayer_pitch; } } */ //MEMORY_ALIGNED_FREE(RawBayer16); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { int precision = codec->precision; if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*info->height*num_channels*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL); } //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL); if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) frame_size = info->width*decoded_height*4*4*sizeof(PIXEL); #if _ALLOCATOR { ALLOCATOR *allocator = decoder->allocator; decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); } #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif decoder->RGBFilterBufferSize = frame_size; } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } //TODO: Replace this memory allocation with a scratch buffer allocation if(decoder->RawBayer16) { uint8_t *outyuv,*line, *source_line; PIXEL16U *bayerptr; PIXEL16U *G,*RG,*BG; int x,y; int src_pitch = info->width*num_channels*sizeof(PIXEL); int y_rmult,y_gmult,y_bmult,y_offset;//shift=8; int u_rmult,u_gmult,u_bmult,u_offset; int v_rmult,v_gmult,v_bmult,v_offset; float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5} }; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, src_pitch, info, chroma_offset, precision); #else TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, src_pitch, info, &decoder->scratch, chroma_offset, precision); #endif if (format == DECODED_FORMAT_YUYV) { line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 256.0; y_rmult = (int)((rgb2yuv[0][0])); y_gmult = (int)((rgb2yuv[0][1])); y_bmult = (int)((rgb2yuv[0][2])); y_offset= (int)((rgb2yuv[0][3])); u_rmult = (int)((rgb2yuv[1][0])); u_gmult = (int)((rgb2yuv[1][1])); u_bmult = (int)((rgb2yuv[1][2])); u_offset= (int)((rgb2yuv[1][3])); v_rmult = (int)((rgb2yuv[2][0])); v_gmult = (int)((rgb2yuv[2][1])); v_bmult = (int)((rgb2yuv[2][2])); v_offset= (int)((rgb2yuv[2][3])); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; source_line += src_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 16384.0; y_rmult = (int)((rgb2yuv[0][0]) * scale); y_gmult = (int)((rgb2yuv[0][1]) * scale); y_bmult = (int)((rgb2yuv[0][2]) * scale); y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f); u_rmult = (int)((rgb2yuv[1][0]) * scale); u_gmult = (int)((rgb2yuv[1][1]) * scale); u_bmult = (int)((rgb2yuv[1][2]) * scale); u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f); v_rmult = (int)((rgb2yuv[2][0]) * scale); v_gmult = (int)((rgb2yuv[2][1]) * scale); v_bmult = (int)((rgb2yuv[2][2]) * scale); v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f); scale = 4096.0; y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; source_line += src_pitch; } } else //RGBs { line = output; source_line = (unsigned char *)decoder->RawBayer16; for(y=0; y<info->height; y++) { int i,noisearray[32]; unsigned short *rgb16 = (unsigned short *)line; outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(i=0; i<32; i++) { noisearray[i] = (rand() & 255); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else if(info->format == DECODED_FORMAT_RGB24) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } else if(info->format == DECODED_FORMAT_RG48) { for(x=0; x<info->width; x++) { int R1,G1,B1; G1 = (*G++); R1 = (*RG++); B1 = (*BG++); *rgb16++ = R1; *rgb16++ = G1; *rgb16++ = B1; } } line += pitch; source_line += src_pitch; } } //MEMORY_ALIGNED_FREE(RawBayer16); } } else // Output the frame in one of the RGB 8-bit formats { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; // Invert the bottom wavelet and convert the output to the requested color format #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); #else TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } } } #if TIMING // Count the number of progressive frames that were decoded progressive_decode_count++; #endif } STOP(tk_inverse); #ifdef ADOBE_MEMORY_FUNCTIONS if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) || (decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2)) { #if _ALLOCATOR if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #else if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #endif } #endif #if (0 && DEBUG) if (logfile) { //uint8_t *subimage = output; uint8_t *subimage = output + (2 * info->width) - 16; DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Exit ReconstructFrameToBuffer\n"); } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif } #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size) { TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *bufptr = (PIXEL *)buffer; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Check that there is enough space for the intermediate results from each channel assert(output_width * sizeof(PIXEL) < buffer_size); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[3]; IMAGE *high_wavelet = transform_array[channel]->wavelet[2]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; } for (row = 0; row < output_height; row++) { char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { // Invert the temporal transform at quarter resolution InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]); // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } // Pack the intermediate results into the output row ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width); // Advance the output row pointer output_row_ptr += output_pitch; } } #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // The pixels are descaled in the inverse temporal transform //const int descale = 0; // Shift the intermediate results to 16-bit pixels const int shift_yu64 = 8; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Initialize a pointer for allocating space in the buffer PIXEL *bufptr = (PIXEL *)buffer; // Array of pointers to the start of each channel in the intermediate results PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS]; // Check that there is enough space for the intermediate results from each channel #if DEBUG assert(output_width * sizeof(PIXEL) < buffer_size); #endif ComputeCube(decoder); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[4]; IMAGE *high_wavelet = transform_array[channel]->wavelet[3]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Force the row of intermediate results to be properly aligned bufptr = (PIXEL *)ALIGN16(bufptr); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; // Check that the row of intermediate results is properly aligned assert(ISALIGNED16(channel_row_ptr[channel])); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } //HACK: Seems to work, I don't know why. //DAN20070304 if (precision == 12) precision = 8; // Apply the inverse temporal transform to the lowpass and highpass rows for (row = 0; row < output_height; row++) { // Most of the color conversion routines use zero descaling int descale = 0; //char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { if (frame_index == 0) { // Invert the temporal transform at quarter resolution to get the even row InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } else { assert(frame_index == 1); // Invert the temporal transform at quarter resolution to get the odd row InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } if(decoder->use_active_metadata_decoder) { uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int i; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); info2.height = 1; for(i=0;i<num_channels;i++) { channeldata[i] = (uint8_t *)channel_row_ptr[i]; channelpitch[i] = 0; } #if 1 { __m128i *Y = (__m128i *)channeldata[0]; __m128i *U = (__m128i *)channeldata[1]; __m128i *V = (__m128i *)channeldata[2]; __m128i v; int x; __m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff); for(x=0;x<info->width;x+=8) { v = _mm_load_si128(Y); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(Y++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(U); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(U++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(V); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(V++, v); } } #else //non SSE2 for(x=0;x<info->width*2;x++) { int val = *gptr++; if(val < 0) val = 0; if(val > 4095) val = 4095; val <<= 4; *src++ = val; } src = scanline2; #endif Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch, decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch); } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert the rows of luma and chroma into the output format switch(format) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: // Pack the intermediate results into the output row if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { assert(0);//need quarter res BAYER To YUV decoder } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, info->colorspace, format); } else { ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format); } break; case COLOR_FORMAT_RGB24: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width, shift_yu64, precision, format); break; case COLOR_FORMAT_B64A: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, COLOR_FORMAT_B64A, color_space); } break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format); } #endif assert(0); break; } } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif #if 0 // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *wavelet = transform_array[channel]->wavelet[1]; // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } for (row = 0; row < output_height; row++) { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision); // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the wavelets with quarter resolution const int wavelet_index = 1; IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index]; // The wavelet should have been reconstructed //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // The lowpass band should be valid //assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0); if((wavelet->band_valid_flags & BAND_VALID_MASK(0)) == 0) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } ComputeCube(decoder); //HACK DAN20110122 -- some formats will not directly decode so need to use the AM route { if( format == COLOR_FORMAT_YU64 || format == COLOR_FORMAT_V210 || format == COLOR_FORMAT_R408 || format == COLOR_FORMAT_V408) { if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } } if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_row_ptr; mailbox->pitch = output_pitch; mailbox->framenum = 0; for(channel = 0; channel < num_channels; channel++) { mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel]; mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL); } memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert each row to the specified output format for (row = 0; row < output_height; row++) { // Right shift for converting lowpass coefficients to pixels int descale = 4; switch(format & 0x7fffffff) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, info->colorspace, format); } else { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision, format); } break; case COLOR_FORMAT_RGB24: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: case COLOR_FORMAT_RGB32_INVERTED: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3], output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0, num_channels); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { //TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122 // } else { // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format); } break; case COLOR_FORMAT_B64A: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: assert(0); break; } // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } } // Release all resources allocated by the decoder void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms) { #if _TIMING && 0 FILE *logfile = decoder->logfile; uint32_t frame_count = decoder->frame_count; if (logfile != NULL && frame_count > 0)\ { #ifdef _WIN32 PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME); #else PrintStatistics(logfile, frame_count, NULL, NULL); #endif } #endif // Free the data structures allocated for decoding ClearDecoder(decoder); } void DecodeForceMetadataRefresh(DECODER *decoder) { CFHDDATA *cfhddata = &decoder->cfhddata; cfhddata->force_metadata_refresh = true; if (decoder->parallelDecoder) { cfhddata = &decoder->parallelDecoder->cfhddata; cfhddata->force_metadata_refresh = true; } } void SetDecoderFlags(DECODER *decoder, uint32_t flags) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Set the decoder flags decoder->flags = flags; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif } void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution) { // Need to modify the codec to use the decoding format decoder->frame.width = width; decoder->frame.height = height; if(format == DECODED_FORMAT_WP13) { decoder->frame.output_format = format; //decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively. decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else if(format == DECODED_FORMAT_W13A) { decoder->frame.output_format = format; // decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64 decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else { decoder->frame.output_format = format; decoder->frame.format = format; //decoder->frame.signed_pixels = 0; decoder->frame.white_point = 16; } decoder->frame.resolution = resolution; decoder->frame.pixel_size = PixelSize(decoder->frame.format); } void SetDecoderCapabilities(DECODER *decoder) { int processor_count; #ifdef _WIN32 int limit_cpus = 32; #else int limit_cpus = 32; // AJA spins off too many #endif // Set the capabilities that are most likely supported by the Intel Mac decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2); if (decoder->thread_cntrl.limit) { limit_cpus = decoder->thread_cntrl.limit; } else if (decoder->thread_cntrl.affinity) { int i; const int max_cpu_count = 32; limit_cpus = 0; for (i = 0; i < max_cpu_count; i++) { if (decoder->thread_cntrl.affinity & (1<<i)) { limit_cpus++; } } } // Set the number of processors processor_count = GetProcessorCount(); if(processor_count > limit_cpus) processor_count = limit_cpus; #if (0 && DEBUG) // Set the number of processors (for debugging) //processor_count = 8; processor_count = 1; fprintf(stderr, "Limit processors to %d\n", processor_count); #endif decoder->thread_cntrl.capabilities |= (processor_count << 16); } int GetDecoderCapabilities(DECODER *decoder) { return decoder->thread_cntrl.capabilities; } bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags) { if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE) { decoder->frame.colorspace = color_flags; // Indicate that the color flags were set as specified return true; } // The specified color flags were not valid return false; } // Compute the resolution corresponding to the specified combination of input and output dimensions int DecodedResolution(int input_width, int input_height, int output_width, int output_height) { int decoded_width; int decoded_height; // Output height can be negative for inverted RGB output_height = abs(output_height); if (output_width == input_width && output_height == input_height) { return DECODED_RESOLUTION_FULL; } // Compute the dimensions for half resolution decoding decoded_width = input_width / 2; decoded_height = input_height / 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_HALF; } // Compute the dimensions for quarter resolution decoding decoded_width /= 2; decoded_height /= 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_QUARTER; } return DECODED_RESOLUTION_UNSUPPORTED; } // Compute the decoded resolution that is closest to the output dimensions int DecodedScale(int input_width, int input_height, int output_width, int output_height) { int decoded_width = input_width; int decoded_height = input_height; static int decodedResolution[] = { DECODED_RESOLUTION_FULL, DECODED_RESOLUTION_HALF, DECODED_RESOLUTION_QUARTER }; int reduction = 0; int max_reduction = 2; // Output height can be negative for inverted RGB output_height = abs(output_height); #if 1 // Always decode to the next larger size while (decoded_width > output_width && decoded_height > output_height && reduction < max_reduction) { // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } } #else while (decoded_width*4 > output_width*5 && decoded_height*4 > output_height*5 && reduction < max_reduction) { #if 0 // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } #else // Better to scale up a smaller image than scale down a larger image decoded_width /= 2; decoded_height /= 2; reduction++; #endif } #endif // Check that the decoded resolution is valid assert(0 <= reduction && reduction <= max_reduction); return decodedResolution[reduction]; } void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution, int *decoded_width_out, int *decoded_height_out) { switch (decoded_resolution) { default: assert(0); case DECODED_RESOLUTION_FULL: *decoded_width_out = encoded_width; *decoded_height_out = encoded_height; break; case DECODED_RESOLUTION_HALF: *decoded_width_out = encoded_width / 2; *decoded_height_out = encoded_height / 2; break; case DECODED_RESOLUTION_QUARTER: *decoded_width_out = encoded_width / 4; *decoded_height_out = encoded_height / 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: //TODO: Check that the lowpass dimensions are correct *decoded_width_out = encoded_width / 8; *decoded_height_out = encoded_height / 8; break; } } // Return true if the specified resolution is supported bool IsDecodedResolution(int resolution) { if (resolution == DECODED_RESOLUTION_QUARTER) { return true; } return (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF); } // Return true if the encoded sample is a key frame bool IsSampleKeyFrame(uint8_t *sample, size_t size) { bool key_frame_flag = false; // Search the first twenty tags for the sample type const int num_tags = 20; int i; BITSTREAM bitstream; InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ); for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE)) { TAGVALUE segment = GetSegment(&bitstream); if (segment.tuple.tag == CODEC_TAG_SAMPLE) { switch (segment.tuple.value) { case SAMPLE_TYPE_GROUP: case SAMPLE_TYPE_FIRST: case SAMPLE_TYPE_IFRAME: key_frame_flag = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: case SAMPLE_TYPE_FRAME: case SAMPLE_TYPE_SECOND: case SAMPLE_TYPE_PFRAME: default: key_frame_flag = false; break; case SAMPLE_TYPE_GROUP_TRAILER: case SAMPLE_TYPE_NONE: case SAMPLE_TYPE_ERROR: case SAMPLE_TYPE_CHANNEL: assert(0); // Unexpected situation key_frame_flag = false; // Report the sample as a non-key frame break; } break; // Found the sample type } } return key_frame_flag; } // Return the number of the more recent decoded frame uint32_t DecodedFrameNumber(DECODER *decoder) { CODEC_STATE *codec = &decoder->codec; if (decoder == NULL) return 0; return codec->frame_number; } /***** Start of the new code for the finite state machine (FSM) decoder *****/ #if _PROCESSOR_DISPATCH __declspec(cpu_dispatch(Pentium_4,Generic)) static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Stub routine for processor specific dispatch } #endif #if _PROCESSOR_GENERIC #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Generic)) #endif // This version assumes that the row is a multiple of 8 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary //assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 8 byte blocks assert(ISALIGNED(length, 8)); // Convert the length from pixels to 8-byte blocks count = (length >> 3); // This code assumes that at least one 8-byte block will be zeroed assert(count > 0); __asm { pxor mm0, mm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 8-byte blocks loop: movq [eax], mm0 // Write 8 bytes of zeros add eax, 8 // Advance to the next 8 byte block sub ebx, 1 // Decrement the number of blocks jg loop } //_mm_empty(); } #endif #if _PROCESSOR_PENTIUM_4 #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Pentium_4)) #endif #ifndef _WIN64 // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); // Convert the length from pixels to 16-byte blocks count = (length >> 4); // This code assumes that at least one 16-byte block will be zeroed assert(count > 0); #if 1 //DANREMOVE memset(rowptr, 0, length); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 16-byte blocks loop: movdqa [eax], xmm0 // Write 16 bytes of zeros add eax, 16 // Advance to the next 16 byte block sub ebx, 1 // Decrement the number of blocks jg loop } #endif } #else // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); memset(rowptr, 0, length); } #endif #endif #if (0 && _DEBUG) // Functions for the finite state machine decoder (debug version) static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index) { // Return the address of the next table entry in the finite state machine return &fsm->next_state[index]; } static void ResetFSM(FSM *fsm) { // Reset the state to the beginning of the finite state machine entries fsm->next_state = fsm->entries; } static void UpdateFSM(FSM *fsm, int next) { // Change the state pointer to the next block of table entries fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE); } #else // Macros for the finite state machine decoder #if _INDIVIDUAL_LUT #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0] #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next] #define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index] #define ResetFSMIndividual(fsm) fsm->next_state_index = 0 #define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next #else #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE) #endif #endif #if _DEBUG static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSM(FSM *fsm) { int num_entries = FSM_INDEX_ENTRIES; int i; for (i = 0; i < num_entries; i++) { FSMENTRY *entry = &fsm->table.entries[0][i]; int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); } } static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } #endif static inline int GetFastByte(BITSTREAM *stream) { // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed--; #endif // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); return byte; } #if 0 static inline int GetFastShort(BITSTREAM *stream) { // Adaptation of the code in GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(lpCurrentWord[0]); int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord+2; // Check that the high bits are zero assert((word & ~BITMASK(16)) == 0); return word; } #endif // Must declare the byte swap function even though it is an intrinsic //int _bswap(int); #if 0 static inline int GetFastLong(BITSTREAM *stream) { uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord; int word = *(lpCurrentWord)++; //word = _bswap(word); word = SwapInt32BtoN(word); stream->lpCurrentWord = (uint8_t *)lpCurrentWord; return word; } #endif #if 0 //DAN20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = image; int column = 0; int32_t value; size_t bytes_row_size = width * sizeof(PIXEL); PIXEL *maxptr; int length = width * sizeof(PIXEL); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); #if (0 && DEBUG) zerorow_count = 0; #endif ZeroHighPassRow(rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { // Undo quantization and scaling value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } } } #endif // Decode a subband of highpass coefficients using a finite state machine. // One byte is read from the bitstream each time and decoded in two steps. // New version that uses a buffer aligned to the cache for decoding. #if 0 static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines) { // This routine assume that the cache line size is 64 bytes assert(_CACHE_LINE_SIZE == 64); // This routine assumes that the input pointer is aligned to a cache line assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE)); // This routine assumes that at least one cache line will be written assert(numCacheLines > 0); #if __GNUC__ memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, ptrCacheLines // Load the pointer to the memory block mov ebx, numCacheLines // Load the count of the number of cache lines loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores movdqa [eax+16], xmm0 movdqa [eax+32], xmm0 movdqa [eax+48], xmm0 add eax, 64 // Advance to the next cache line sub ebx, 1 // Decrement the number of cache lines jg loop } #endif // The routine returns the pointer to the cache line after zeroing the block } #endif #if 0 static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length) { // Note that the length is in units of bytes (not pixels) int count; // Number of 16-byte blocks to copy // Check that the row length is an integer multiple of 16-byte blocks assert(ISALIGNED(length, 16)); // Convert the row length to the number of 16-byte blocks to copy count = length >> 4; // This routine assumes that at least one 16 byte block will be copied assert(count > 0); #if __GNUC__ // Use standard memory copy memcpy(rowptr, buffer, length); #else // Copy a multiple of 16 byte blocks __asm { mov eax, rowptr // Load the pointer to the destination mov ebx, buffer // Load the pointer to the source mov ecx, count // Load the number of 16-byte blocks to copy loop: movdqa xmm0, [ebx] // Load 16 bytes from the source movntdq [eax], xmm0 // Copy 16 bytes to the destination add eax, 16 // Advance to the group of 16 bytes add ebx, 16 sub ecx, 1 // Decrement the number of blocks to copy jg loop } #endif } #endif // DecodeBandFSMBuffered is no longer used #if 0 //dan20041030 not used bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization, char *decoding_buffer, size_t decoding_buffer_size) { char *rowptr = (char *)image; // Pointer to current row char *maxptr = rowptr + height * pitch; // Address of row after the last row FSMENTRY *entry; int index; int byte; int column = 0; int32_t value; size_t row_size; size_t cache_row_size; // Size of a row in bytes int cache_line_count; // Size of the buffer in cache lines PIXEL *buffer; // Pixel pointer to the buffer int length; // Length of row in bytes // Check that the processing size allows two chunks per byte assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); // The bitstream buffer should be empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Compute the number of cache lines used in the buffer row_size = width * sizeof(PIXEL); cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE); cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT); // Check that the buffer is large enough assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE)); // This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes length = pitch; assert(length == ALIGN(row_size, 16)); // Cast the buffer pointer for pixel access buffer = (PIXEL *)decoding_buffer; // Zero the decoding buffer ZeroHighPassBuffer(buffer, cache_line_count); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, entry->next_state); // No magnitude values decoded? if (entry->value0 == 0) { // No magnitudes decoded so just advance the column pointer column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // Only one magnitude value decoded? else if (entry->value1 == 0) { // Process the magnitude value that was decoded // Undo quantization and scaling value = quantization * entry->value0; // Advance to the column where the value should be placed column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } else // Two magnitude values were decoded { // Check the column before storing values assert(0 <= column && column < width); if (column < width - 1) { // Dequantize and store the first value value = quantization * entry->value0; buffer[column++] = SATURATE(value); // Dequantize and store the second value value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { // Dequantize and store the first value in the current row value = quantization * entry->value0; buffer[column] = SATURATE(value); // Dequantize the second value value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; // Store the second value in the new row buffer[column++] = SATURATE(value); } } // Decode the second 4-bit chunk index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if (column < width-1) { value = quantization * entry->value0; buffer[column++] = SATURATE(value); value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { value = quantization * entry->value0; buffer[column] = SATURATE(value); value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; buffer[column++] = SATURATE(value); } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM, combine the two results decoded from one byte bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, skip; uint8_t byte; FSMENTRY *entry1, *entry2; PIXEL *rowptr = image; int row = 0, column = 0; int32_t value,bytes_row_size = width*sizeof(PIXEL); PIXEL *maxptr = rowptr + height*pitch; // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); ZeroHighPassRow(rowptr, width); // Double check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream //byte = GetBits(stream, BITSTREAM_WORD_SIZE); #if 0 byte = GetByte(stream); if (stream->error != BITSTREAM_ERROR_OKAY) { stream->error = VLC_ERROR_NOTFOUND; return false; } #else // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; entry1 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry1->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); entry2 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry2->next_state); // Return when the subband is completely decoded if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // If no magnitude value is decoded at the first step if (entry1->value0 == 0) { // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if(entry2->value1 == 0) { // Skip to the non-zero position column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If only one magnitude is decoded at the first step else if(entry1->value1 == 0) { // Undo quantization and scaling value = quantization * entry1->value0; column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if (entry2->value1 == 0) { // Undo quantization and scaling value = quantization * entry2->value0; column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If two magnitudes are decoded at the first step else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry1->value0; rowptr[column++] = SATURATE(value); value = quantization * entry1->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry1->value0; rowptr[column] = SATURATE(value); value = quantization * entry1->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } // If two magnitudes are decoded at the first step // then at most one more magnitude can be decoded at the second step assert(entry2->value1 == 0); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry2->pre_skip; // entry2->pre_skip <=4 must be true // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else { column += entry2->pre_skip; // must be a small zero run // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; int column = 0; int32_t value; PIXEL8S *maxptr; int length = width * sizeof(PIXEL8S); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); ZeroHighPassRow((PIXEL *)rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } } } #endif // same as DecodeBandFSM8sNoGap but output to 16bit data bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = (PIXEL *)image; PIXEL16S *bandendptr; int value; #if ERROR_TOLERANT uint8_t *startCurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #endif #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif if (image == NULL) { return false; } // Reset the decoder ResetFSM(fsm); pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } SecondPass: rowptr = (PIXEL16S *)image; AlignBits(stream); AlignBitsTag(stream); stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // Same as DecodeBandFSM8sNoGap but output to 16bit data #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile) #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch) #endif { int index, byte; FSMENTRY *entry; FSMENTRYFAST *entryfast; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; ptrdiff_t offset; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif #if (0 && DEBUG) DebugOutputBitstreamPosition(stream); DebugOutputBitstreamBytes(stream, 16); #endif // Reset the decoder ResetFSM(fsm); if (fsm->InitizedRestore != 1 && fsm->InitizedRestore != 0) return false; #if (0 && DEBUG) DebugOutputFSM(fsm); #endif pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); //memset(rowptr, 0, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 644; // two 320 zero runs with 4 zeros after is the maximum step size per loop. // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0x1ff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the appropriate distance rowptr = &rowptr[(entryfast->pre_post_skip >> 12) & 0x7]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0x1ff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the decoded zero runs rowptr = &rowptr[(entryfast->pre_post_skip >> 12) & 0x7]; } offset = CurrentWord - startCurrentWord; stream->lpCurrentWord += offset; stream->nWordsUsed -= (int)offset; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(bandendptr >= rowptr) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif #if (0 && DEBUG) PrintBitstreamPosition(stream, logfile); #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed > 0) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0x1ff]; // max zero run is 320 pre-skip // Write down the first decoded magnitude if ((value = entry->value0)) { #if ERROR_TOLERANT if (bandendptr > rowptr) #endif rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { #if ERROR_TOLERANT if (bandendptr > rowptr+1) #endif rowptr[1] = value;//SATURATE(value); } // Skip the appropriate distance rowptr = &rowptr[(entry->pre_post_skip >> 12) & 0x7];// max zero post-skip 4 (nibble of zeros in the FSM) // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0x1ff]; // max zero run is 320 pre-skip // Write down the first decoded magnitude if ((value = entry->value0)) { #if ERROR_TOLERANT if (bandendptr > rowptr) #endif rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { #if ERROR_TOLERANT if (bandendptr > rowptr+1) #endif rowptr[1] = value;//SATURATE(value); } // Skip the decoded zero runs rowptr = &rowptr[(entry->pre_post_skip >> 12) & 0x7];// max zero post-skip 4 (nibble of zeros in the FSM) } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant) { int index, byte; FSMENTRY *entry; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif // Reset the decoder ResetFSM(fsm); //This is been called with non-prequantized FSM if(quant>1) level /= quant; pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 1000; // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord); stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord)); // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as // zero runs. Therefore decoded magnitude values can be written down without the need to check // if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM // can be significantly reduced. // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding #if !_INDIVIDUAL_ENTRY #if 0 //dan20041030 not used bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded //while (rowptr < bandendptr) for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif #elif _SINGLE_FSM_TABLE bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte, i; FSMENTRY *entry,*firstentry = fsm->table->firstentry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #else bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if 1 __declspec(align(4)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if(entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if (entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif // Decode the highpass band coefficients but do not write them out - used in SIF mode bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; pitch /= sizeof(PIXEL8S); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); } } #if _TIMING extern TIMER tk_fastruns; #endif #if 0 //dan20041030 not used // New version of coefficient runs decoder that uses a finite state machine with a scaling factor bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; int row = 0; int pitch; int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(pixel_type == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch //pitch = wavelet->pitch; // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif // Get one byte from the bitstream and decode 4 bits at a time result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index); DumpWaveletRow(wavelet, band_index, 0, logfile); } #endif end: STOP(tk_fastruns); return true; } #endif #if _DEQUANTIZE_IN_FSM void ReQuantFSM(FSM *fsm, int quant) { int count = 0; int i, j; short *restore = &fsm->restoreFSM[0]; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { entry[j].value0 = restore[count++]; entry[j].value1 = restore[count++]; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm_table.entries_ind[i]; if(entry) { entry->value0 = restore[count++]; entry->value1 = restore[count++]; } } #endif } void DeQuantFSM(FSM *fsm, int quant) { int i, j; if(fsm->LastQuant > 1 && fsm->LastQuant != quant) { ReQuantFSM(fsm, fsm->LastQuant); } else if(fsm->LastQuant == quant) { return; } if(fsm->InitizedRestore == 0) { short *restore = &fsm->restoreFSM[0]; int count = 0; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { restore[count++] = entry[j].value0; restore[count++] = entry[j].value1; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { restore[count++] = entry->value0; restore[count++] = entry->value1; } } #endif fsm->InitizedRestore = 1; } #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { if(entry[j].value0 < 0x7ff0) // band end trailer entry[j].value0 *= quant; entry[j].value1 *= quant; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { if(entry->value0 < 0x7ff0) // band end trailer etc entry->value0 *= quant; entry->value1 *= quant; } } #endif fsm->LastQuant = quant; } #endif // _DEQUANTIZE_IN_FSM // New version of coefficient runs decoder that uses a finite state machine with a scaling factor //dan 7-11-03 bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, int threading) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; int active_codebook = decoder->codec.active_codebook; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; int difference_coding = decoder->codec.difference_coding; //int localquant = 1; int peaklevel = 0; //int peaksize = 0; PIXEL *peakbase = NULL; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n", subband, decoder->codec.active_codebook, difference_coding); } #endif decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet //assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM //assert(fsm != NULL); if(fsm == NULL) return false; if(width==0 || height == 0) return false; if (fsm->InitizedRestore != 0 && fsm->InitizedRestore != 1) return false; // All rows are treated as one long row that covers the entire band size = fsm->table.num_states; //assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels //assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); if(wavelet->pixel_type[band_index] != PIXEL_TYPE_16S) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; if (rowptr == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } //pitch = wavelet->pitch8s; // Use the 8-bit pitch pitch = wavelet->pitch; peaklevel = codec->peak_table.level; peakbase = codec->peak_table.base; #if _THREADED threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0; if(threading) { decoder->entropy_worker_new.threads_used = 1; { //int start = stream->nWordsUsed; int end; struct entropy_data_new *data; int next_queue_num = decoder->entropy_worker_new.next_queue_num++; data = &decoder->entropy_worker_new.entropy_data[next_queue_num]; memcpy(&data->stream,stream, sizeof(BITSTREAM)); data->rowptr = rowptr; data->width = width; data->height = height; data->pitch = pitch; data->peaks = peakbase; data->level = peaklevel; data->quant = quant; data->wavelet = wavelet; data->band_index = band_index; data->active_codebook = active_codebook; data->difference_coding = difference_coding; // Start only a particular threadid if(next_queue_num == 0) { ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1); #if _DELAYED_THREAD_START==0 ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif } else { // Set the work count to the number of rows to process ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1); } { unsigned short tag = *(stream->lpCurrentWord-8) << 8; if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE)) { int chunksize; int value = *(stream->lpCurrentWord-6) << 8; value |= *(stream->lpCurrentWord-5); tag |= *(stream->lpCurrentWord-7); tag = NEG(tag); chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); chunksize *= 4; chunksize -= 8; { uint32_t *ptr = (uint32_t *)stream->lpCurrentWord; ptr += (chunksize>>2); if(*ptr != 0x00003800) // bandend { goto continuesearch; } } stream->lpCurrentWord += chunksize; stream->nWordsUsed -= chunksize; end = stream->nWordsUsed; } else { continuesearch: while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend { stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; } end = stream->nWordsUsed; } } } } else #endif // _THREADED { DeQuantFSM(fsm, quant); if (peaklevel && peakbase) { result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1); } else { #if _DEBUG result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile); #else result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch); #endif } if(difference_coding) { int x,y; PIXEL *line = rowptr; for(y=0;y<height;y++) { for(x=1;x<width;x++) { line[x] += line[x-1]; } line += pitch/2; } } if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band_index); } } //assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //end: STOP(tk_fastruns); return true; } bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; //int row = 0; int pitch; //int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is 8bit/pixel assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif #if 1 // Get one byte from the bitstream and decode 4 bits at a time result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif //end: STOP(tk_fastruns); return true; } // The third version is also based on the finite state machine decoder with // gaps between rows encoded as zero runs, but dequantization is performed as // the highpass values are read from the bitstream and placed into a row buffer. // The highpass values are not written into the wavelet highpass band. // Eventually this routine will be merged into the routine DecodeTemporalBand8s // since this routine contains code specific to the inverse temporal transform // and DecodeTemporalBand8s has become a shell. #if 0 bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, IMAGE *frame0, IMAGE *frame1) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm; // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *lowpass = wavelet->band[0]; int lowpass_pitch = wavelet->pitch; //PIXEL8S *rowptr; int row = 0; int pitch; int row_width; // Width of the encoded row of highpass coefficients PIXEL *even = frame0->band[0]; PIXEL *odd = frame1->band[0]; int even_pitch = frame0->pitch; int odd_pitch = frame1->pitch; int pixel_type = wavelet->pixel_type[band_index]; int quantization = wavelet->quantization[band_index]; PIXEL *buffer; size_t buffer_size; int index, byte; FSMENTRY *entry; int column = 0; int32_t value; int buffer_row_size; PIXEL *highpass; // Check that the wavelet into which the band will be decoded is valid assert(wavelet != NULL); if (wavelet == NULL) return false; // Check that the finite state machine is valid assert(fsm != NULL); if (fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check that the band was encoded using 8-bit signed coefficients assert(pixel_type == PIXEL_TYPE_8S); pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows // Get the buffer for storing one row of dequantized highpass coefficients buffer = (PIXEL *)decoder->buffer; buffer_size = decoder->buffer_size; // The finite state machine does not support a marker at the end of each row assert(RUNS_ROWEND_MARKER == 0); /***** Start of code included from DecodeBandFSM8s() *****/ // Check that one byte can be processes as two 4-bit nibbles assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE)); // Check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); buffer_row_size = pitch * sizeof(PIXEL); lowpass_pitch /= sizeof(PIXEL); even_pitch /= sizeof(PIXEL); odd_pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band //maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes //row_size = ALIGN16(row_size); // Check that the buffer is large enough to hold one row //assert(buffer_size >= row_size); assert(buffer_size >= buffer_row_size); // Use the buffer for the row or highpass coefficients highpass = buffer; #if 1 // The row spans the allocated width (pitch) of the band in no gap mode row_width = pitch; #else // For debugging row_width = wavelet->encoded_pitch/sizeof(PIXEL8S); #endif // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); // Decode zero runs and magnitude values (with appended sign bit) // until the marker for the band end trailer has been decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); /***** Decode the first 4-bit nibble *****/ // Decode the first 4-bit nibble index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < row_width); // Dequantize the value and store it in the highpass row buffer highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { value = entry->value0; highpass[column] = quantization * value; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } /***** Decode the second 4-bit nibble *****/ // Decode the second 4-bit nibble index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < row_width); highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both highpass values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { highpass[column] = quantization * entry->value0; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } } /***** End of the code included from DecodeBandFSM8s() *****/ #if 0 assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if 0 end: return true; #endif } #endif /***** End of the code for the finite state machine decoder *****/ #if 1 // The second version applies the horizontal inverse filters row by row, so the // memory access pattern is more efficient. The lowpass and highpass temporal // coefficients for each row are inverted and packed into the output in one pass. // Apply the inverse horizontal-temporal transform and pack the output into a buffer void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough #if DEBUG assert((2 * num_channels * temporal_row_size) <= buffer_size); #endif // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row //DAN20051004 -- possible reversiblity issue //InvertHorizontalRow8sBuffered //----------------------- Maybe bad InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the packed output image output += field_pitch; } } #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; uint8_t *output_row_ptr = output; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Divide the buffer space between the four threads buffer_size /= 4; buffer += buffer_size * thread_index; // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } output += field_pitch * (half_height - 1); field_pitch = NEG(field_pitch); } else { assert(0); // what about middle threads? } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif */ // Loop until all of the rows have been processed for (;;) { // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * 2 * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { //PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size); // assert(0 <= row && row < half_height); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n", row, thread_index, output_row_ptr); } #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; #if (0 && DEBUG) // Invert the horizontal transform by duplicating the lowpass pixels InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #else // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #endif // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel //horizontal_lowlow[channel] += pitch; //horizontal_lowhigh[channel] += pitch; //horizontal_highlow[channel] += pitch; //horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the input transforms //row += row_step; // Advance to the next row in the packed output image //output += field_pitch; } else { // No more rows to process break; } } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif //#if BUILD_PROSPECT // Apply the inverse horizontal-temporal transform and output rows of luma and chroma #if 0 void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results #if DEBUG assert((2 * temporal_row_size) <= buffer_size); #endif // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { #if (1 && DEBUG_ROW16U) PIXEL16U *output_row_ptr = output_buffer; PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS]; int planar_pitch[TRANSFORM_MAX_CHANNELS]; ROI strip = {luma_width, 2}; uint8_t *yuv_output = (uint8_t *)output; uint8_t *output1 = yuv_output; uint8_t *output2 = yuv_output + output_pitch; #else PIXEL16U *output_row_ptr = output; #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } else { // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } //***DEBUG*** //ZeroMemory(temporal_highpass, temporal_row_size); //FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; #if (1 && DEBUG_ROW16U) // Write the rows of 16-bit pixels to a temporary buffer planar_output[channel] = output_row_ptr; planar_pitch[channel] = output_pitch * sizeof(PIXEL); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, planar_output[channel], planar_pitch[channel], output_row_width[channel], frame_width, chroma_offset, precision); //if (channel > 0) if (0) { uint8_t *output3 = (uint8_t *)planar_output[channel]; uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel]; int output_size = output_row_width[channel] * sizeof(PIXEL); int fill_value = (128 << 8); //ZeroMemory(output3, output_size); //ZeroMemory(output4, output_size); FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value); FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value); } #else // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); #endif // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; // Check the output row alignment assert(ISALIGNED16(output_row_ptr)); } // Advance to the next group of rows in the output image output += field_pitch/sizeof(PIXEL16U); } } //#endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); #if 0 if (thread_index == 1) { // Skip over the buffer space used by the other thread size_t buffer_usage = 2 * temporal_row_size; buffer += buffer_usage; buffer_size -= buffer_usage; } #else // Divide the buffer space between the two threads buffer_size /= 4; buffer += buffer_size * thread_index; #endif // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results assert((2 * temporal_row_size) <= buffer_size); // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); //horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } //output += field_pitch * (half_height - 1); output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U); output_pitch = NEG(output_pitch); field_pitch = NEG(field_pitch); } else { assert(0); // middle threads } */ #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif // Loop until all of the rows have been processed for (;;) { PIXEL16U *output_row_ptr; // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { assert(0 <= row && row < half_height); if(decoder->frame.resolution == DECODED_RESOLUTION_FULL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } } else { // No more rows to process break; } } #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif #if 0 DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } #endif extern void fast_srand( int seed ); // Apply the inverse horizontal-temporal transform and pack the output into a buffer #if 0 void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; //int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); size_t temporal_buffer_size = 2 * num_channels * temporal_row_size; #if DEBUG size_t yuv_row_size = frame_width * 2; #endif char *yuv_buffer; size_t yuv_buffer_size; int field_pitch = 2 * output_pitch; int format = frame->format; bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32); int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Allocate buffer space for the intermediate YUV data yuv_buffer = buffer + temporal_buffer_size; yuv_buffer_size = buffer_size - temporal_buffer_size; #if DEBUG assert(yuv_buffer_size >= 2 * yuv_row_size); #endif if (inverted) { output += (frame_height - 1) * output_pitch; output_pitch = (- output_pitch); field_pitch = (- field_pitch); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; //#if BUILD_PROSPECT if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) { // Invert the temporal bands from all channels and pack as V210 output InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, chroma_offset, precision); } else //#endif { // Invert the temporal bands from all channels and pack as 8-bit output InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, frame->colorspace, chroma_offset, precision, row); } // Advance to the next row in the packed output image output += field_pitch; } } void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format) { bool inverted = false; size_t output_size; START(tk_convert); // Determine the type of conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted); break; default: // Unsupported format (return a blank frame) assert(0); output_size = image->height * output_pitch; memset(output_buffer, COLOR_CHROMA_ZERO, output_size); break; } STOP(tk_convert); } void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height, int output_pitch, bool inverted) { IMAGE *y_image = images[0]; IMAGE *u_image = images[1]; IMAGE *v_image = images[2]; int width = y_image->width; int height = output_height; PIXEL *y_row_ptr = y_image->band[0]; PIXEL *u_row_ptr = u_image->band[0]; PIXEL *v_row_ptr = v_image->band[0]; int y_pitch = y_image->pitch/sizeof(PIXEL); int u_pitch = u_image->pitch/sizeof(PIXEL); int v_pitch = v_image->pitch/sizeof(PIXEL); uint8_t *outrow = output_buffer; uint8_t *outptr; int row, column; // Definitions for optimization //const int column_step = 2 * sizeof(__m64); // Column at which post processing must begin //int post_column = width - (width % column_step); // The output pitch should be a positive number before inversion assert(output_pitch > 0); // Should the image be inverted? if (inverted) { outrow += (height - 1) * output_pitch; // Start at the bottom row output_pitch = NEG(output_pitch); // Negate the pitch to go up } for (row = 0; row < height; row++) { outptr = outrow; // Fill the rest of the output row for (column = 0; column < width; column+=4) { int chroma_column = column>>1; *(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5); *(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5); *(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5); *(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5); } // Advance to the next rows in the input and output images y_row_ptr += y_pitch;// 3D Work u_row_ptr += u_pitch; v_row_ptr += v_pitch; outrow += output_pitch; } } // Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth) { //IMAGE *image = frame->channel[0]; bool inverted = false; int output_width = info->width; int output_height = info->height; int descale = precision - 8; // Get the color format from the decoded format int color_format = info->format & COLOR_FORMAT_MASK; // Must compile this routine with switches set for decoding to 8-bit unsigned pixels #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) assert(0); return; #endif START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RG48: if(encode_format == ENCODED_FORMAT_BAYER) { ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height, output_pitch, 2, num_channels); } else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { int scale = 1; if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) scale = 2; ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height, output_pitch, scale, num_channels); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; case DECODED_FORMAT_RG64: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { assert(0); } break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if (precision == CODEC_PRECISION_10BIT) { int lineskip = 1; // 3D Work int pitch = output_pitch; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) { if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work { lineskip = 2; if(decoder->channel_blend_type == 3) pitch *= 2; } } if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side { SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted); } else { //ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip); ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip); } } else { //ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted); ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted); } break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif #if 0 case DECODED_FORMAT_UYVY: ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted); break; #endif //#if BUILD_PROSPECT case DECODED_FORMAT_V210: if (precision == CODEC_PRECISION_10BIT) { ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted); } else { //ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted); assert(0); } break; //#endif case DECODED_FORMAT_YU64: // DAN04262004 ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision); break; //#if BUILD_PROSPECT case DECODED_FORMAT_YR16: ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision); break; //#endif default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) #error Must set compile-time switches to decode to 8-bit pixels #endif START(tk_convert); #if _ENCODE_CHROMA_OFFSET #error Cannot handle images encoded with a non-zero chroma offset #endif // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; START(tk_convert); // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: //ConvertPlanarYUVToRGB ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; case COLOR_FORMAT_WP13: case COLOR_FORMAT_B64A: case COLOR_FORMAT_RG48: case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL); break; case DECODED_FORMAT_YUYV: assert(0);// These routines are not yet updated for ROW16u inputs ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; case DECODED_FORMAT_UYVY: assert(0);// These routines are not yet updated for ROW16u inputs ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } // Convert one row of packed YUYV to the specified color void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision) { size_t row_size = 2 * length; bool inverted = false; START(tk_convert); // Determine the type of color conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision); break; case DECODED_FORMAT_YUYV: if(precision == 8) memcpy(output, input, row_size); else { //need to dither to 8-bit assert(0); } break; case DECODED_FORMAT_UYVY: if(precision == 8) ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY); else { //need to dither to 8-bit assert(0); } break; //#if BUILD_PROSPECT case DECODED_FORMAT_V210: assert(0); // should get here with 8bit data. //ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210); break; case DECODED_FORMAT_YU64: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; case DECODED_FORMAT_BYR3: case DECODED_FORMAT_BYR4: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; //#endif default: // Unsupported format (output a blank frame) assert(0); memset(output, 0, row_size); break; } STOP(tk_convert); } #if _THREADED_DECODER IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index, int width, int height, int level, int type) { IMAGE *wavelet = transform->wavelet[index]; assert(decoder != NULL && transform != NULL); if (decoder != NULL && transform != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Lock access to the wavelet data #if _DELAYED_THREAD_START==0 Lock(&decoder->entropy_worker_new.lock); #endif // Get the wavelet from the transform data structure (thread safe) wavelet = transform->wavelet[index]; // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; // Unlock access to the wavelet data #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } return wavelet; } // Update the codec state with the information in a tag value pair CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif bool optional = false; int chunksize = 0; bool result; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n", tag, value, optional); } #endif switch (tag) { case CODEC_TAG_ZERO: // Used internally //assert(0); // Should not occur in the bitstream error = CODEC_ERROR_INVALID_BITSTREAM; break; case CODEC_TAG_SAMPLE: // Type of sample //assert(0); if (value == SAMPLE_TYPE_CHANNEL) { result = DecodeSampleChannelHeader(decoder, input); if (!result) error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_INDEX: // Sample index table //assert(0); // Need to figure out how to return the group index { uint32_t count = (uint32_t)value; if (count <= TRANSFORM_MAX_CHANNELS) { int i; uint32_t* index = (uint32_t*)(&codec->channel_size[0]); DecodeGroupIndex(input, index, count); for (i = 0; i < (int)count; i++) { if(index[i] > (uint32_t)input->dwBlockLength) error = CODEC_ERROR_SAMPLE_INDEX; } codec->num_channels = count; } else error = CODEC_ERROR_SAMPLE_INDEX; } break; case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband? if(value>=0 && value < CODEC_MAX_SUBBANDS) { // This tag is obsolete and not used in modern streams int subband = value; // Check that the subband number makes sense //assert(0 <= subband && subband <= codec->max_subband); if (! (0 <= subband && subband <= codec->max_subband)) { error = CODEC_ERROR_DECODING_SUBBAND; break; } // Decompress the subband result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } else error = CODEC_ERROR_DECODING_SUBBAND; break; case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act. codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete { // This tag value pair marks the beginning of the encoded coefficients // The subband number has already been decoded int subband = codec->band.subband; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_ENTRY: // Entry in sample index //assert(0); // Need to figure out how to return the group index error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_MARKER: // Bitstream marker { int marker = value; uint8_t *current_position; // Save the current bitstream position current_position = GetBitstreamPosition(input); current_position -= 4; // Step back to before the GetSegment i.e. the TAG if (IsLowPassHeaderMarker(marker)) { // Save the bitstream position for the start of the channel codec->channel_position = current_position; } else if (IsLowPassBandMarker(marker)) { int subband = 0; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } } break; case CODEC_TAG_VERSION_MAJOR: // Version //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_MINOR: // Minor version number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_REVISION: // Revision number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_EDIT: // Edit number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_TRANSFORM_TYPE: // Type of transform //assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST); if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST) { int i; codec->transform_type = value; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } else error = CODEC_ERROR_TRANSFORM_TYPE; break; case CODEC_TAG_NUM_FRAMES: // Number of frames in the group //assert(0 <= value && value <= TRANSFORM_NUM_FRAMES); if (0 <= value && value <= TRANSFORM_NUM_FRAMES) codec->num_frames = value; else error = CODEC_ERROR_NUM_FRAMES; break; case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform //assert(value <= CODEC_MAX_CHANNELS); if (value <= CODEC_MAX_CHANNELS) codec->num_channels = value; else error = CODEC_ERROR_NUM_CHANNELS; break; case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform //assert(0 < value && value <= TRANSFORM_NUM_WAVELETS); if (0 < value && value <= TRANSFORM_NUM_WAVELETS) codec->num_wavelets = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands //assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS); if (0 < value && value <= TRANSFORM_NUM_SUBBANDS) codec->num_subbands = value; else error = CODEC_ERROR_NUM_SUBBANDS; break; case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels //assert(0 < value && value <= TRANSFORM_NUM_SPATIAL); if (0 < value && value <= TRANSFORM_NUM_SPATIAL) codec->num_spatial = value; else error = CODEC_ERROR_NUM_SPATIAL; break; case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet //assert(value == TRANSFORM_FIRST_WAVELET); if (value == TRANSFORM_FIRST_WAVELET) codec->first_wavelet = value; else error = CODEC_ERROR_FIRST_WAVELET; break; case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum codec->sample_done = true; break; case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start codec->frame.type = value; break; case CODEC_TAG_FRAME_WIDTH: // Width of the frame if (value > 0 && value <= 32768) codec->frame.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_FRAME_HEIGHT: // Height of the frame if (value > 0 && value <= 32768) { codec->frame.height = value; //DAN20080729 -- Initialize the default colorspace based on clip resolution if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED) { int internalheight = value; int internalwidth = codec->frame.width; if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { internalwidth *= 2; internalheight *= 2; } if (internalheight > 576 || internalwidth > 720) decoder->frame.colorspace |= COLOR_SPACE_CG_709; else decoder->frame.colorspace |= COLOR_SPACE_CG_601; } //if(decoder->frame.colorspace_filedefault) // decoder->frame.colorspace = decoder->frame.colorspace_filedefault; if (decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; } else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729 if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709, //there was a bug in 3.9.4 that had bayer flagged as 601. if(decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; else { if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422) { decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709); decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709)); //Let the VSRGB status be controllable by the calling application (e.g. Vegas) } else { decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB); decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB)); } } decoder->frame.colorspace_filedefault = value; break; case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA) //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels codec->input_format = value; // Set the encoded format if it has not already been set // error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value); break; case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data case CODEC_TAG_OLD_ENCODED_FORMAT: if (value >= ENCODED_FORMAT_MINIMUM && value <= ENCODED_FORMAT_MAXIMUM) { codec->encoded_format = value; if (codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3) codec->encoded_format = ENCODED_FORMAT_RGB_444; } else error = CODEC_ERROR_BADFORMAT; break; case CODEC_TAG_FRAME_INDEX: // Position of frame within the group codec->frame.group_index = value; break; case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum codec->sample_done = true; break; case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band codec->lowpass.subband = value; error = SetDefaultEncodedFormat(codec); break; case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels if(value > 0 && value <= 4) codec->lowpass.level = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band if(value > 0 && value < codec->frame.width/4) codec->lowpass.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band if (value > 0 && value < codec->frame.height/4) codec->lowpass.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset codec->lowpass.margin.top = value; break; case CODEC_TAG_MARGIN_BOTTOM: codec->lowpass.margin.bottom = value; break; case CODEC_TAG_MARGIN_LEFT: codec->lowpass.margin.left = value; break; case CODEC_TAG_MARGIN_RIGHT: codec->lowpass.margin.right = value; break; case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters codec->lowpass.pixel_offset = value; break; case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding if(value > 0) codec->lowpass.quantization = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel if(value >=8 && value <= 16) codec->lowpass.bits_per_pixel = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_WAVELET_TYPE: // Type of wavelet if(value >= 1 && value <= WAVELET_TYPE_HIGHEST) codec->highpass.wavelet_type = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform if (value >= 0 && value <= 6) codec->highpass.wavelet_number = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform if (value >= 0 && value <= 4) codec->highpass.wavelet_level = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_NUM_BANDS: // Number of wavelet bands if (value >= 0 && value <= 4) codec->highpass.num_bands = value; else error = CODEC_ERROR_NUM_SUBBANDS; break; case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band if (value > 0 && value <= codec->frame.width / 2) codec->highpass.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band if (value > 0 && value <= codec->frame.height / 2) codec->highpass.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete) codec->highpass.lowpass_border = value; break; case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete) codec->highpass.highpass_border = value; break; case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band codec->highpass.lowpass_scale = value; break; case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band codec->highpass.lowpass_divisor = value; break; case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band if (value < IMAGE_NUM_BANDS) codec->band.number = value; else error = CODEC_ERROR_BAND_NUMBER; break; case CODEC_TAG_BAND_WIDTH: // Band data width if (value > 0 && (codec->frame.width / value) <= 16 && (codec->frame.width / value) * value == codec->frame.width) // true for a 3 level wavelet (with 4:2:2 sampling) codec->band.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_BAND_HEIGHT: // Band data height if (value > 0 && (codec->frame.height / value) <= 16 && (codec->frame.height / value) * value == codec->frame.height) // true for a 3 level wavelet (with 4:2:2 sampling) codec->band.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band if (value == 0xff || (value >= 0 && value < CODEC_MAX_SUBBANDS)) codec->band.subband = value; else error = CODEC_ERROR_BAND_NUMBER; break; case CODEC_TAG_BAND_ENCODING: // Encoding method for this band if(value >= BAND_ENCODING_ZEROTREE && value <= BAND_ENCODING_LOSSLESS) codec->band.encoding = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band if (value >= 1) codec->band.quantization = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_SCALE: // Band scale factor codec->band.scale = value; break; case CODEC_TAG_BAND_TRAILER: // Band trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_POSITIVES: // Number of positive values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_NEGATIVES: // Number of negative values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_CHANNEL: // Channel number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream //assert(0); break; case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio codec->picture_aspect_x = value; //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio codec->picture_aspect_y = value; //assert(0); break; case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding // Progressive versus interlaced decoding is specified by the sample flags error = UpdateCodecFlags(codec, value); break; case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream codec->frame_number = value; break; // This TAG is now support as part of the universal decoder. // Only Prospect HD builds can decode 10bit. case CODEC_TAG_PRECISION: // Number of bits in the video source if (value == CODEC_PRECISION_8BIT || value == CODEC_PRECISION_10BIT || value == CODEC_PRECISION_12BIT) { codec->precision = value; { int i; for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++) { TRANSFORM* transform = decoder->transform[i]; if (transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } } else error = CODEC_ERROR_INVALID_PRECICION; break; case CODEC_TAG_PRESCALE_TABLE: { int i; int prescale[TRANSFORM_MAX_WAVELETS] = {0}; for(i=0;i<TRANSFORM_MAX_WAVELETS;i++) prescale[i] = value >> (14-i*2) & 0x3; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { memcpy(transform->prescale, prescale, sizeof(prescale)); } } } break; case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. codec->version[0] = (value>>12) & 0xf; codec->version[1] = (value>>8) & 0xf; codec->version[2] = value & 0xff; break; case CODEC_TAG_QUALITY_L: // codec->encode_quality &= 0xffff0000; codec->encode_quality |= value; break; case CODEC_TAG_QUALITY_H: // codec->encode_quality &= 0xffff; codec->encode_quality |= value<<16; break; case CODEC_TAG_BAND_CODING_FLAGS: codec->active_codebook = value & 0xf; // 0-15 valid code books if(codec->active_codebook > CODEC_NUM_CODESETS) error = CODEC_ERROR_BAD_FRAME; codec->difference_coding = (value>>4) & 1; break; // Peak table processing case CODEC_TAG_PEAK_TABLE_OFFSET_L: codec->peak_table.offset &= ~0xffff; codec->peak_table.offset |= (value & 0xffff); codec->peak_table.base = (PIXEL *)(input->lpCurrentWord); codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_TABLE_OFFSET_H: codec->peak_table.offset &= 0xffff; codec->peak_table.offset |= (value & 0xffff)<<16; codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_LEVEL: codec->peak_table.level = value; codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL); break; case CODEC_TAG_PEAK_TABLE: //this is the chunk header, so we have peak data codec->peak_table.level = 0; // reset for the next subband //Just skip as the data was read ahead chunksize = value; chunksize &= 0xffff; input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; break; #if (1 && DEBUG) case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only) //assert(0); error = CODEC_ERROR_BAD_FRAME; break; #endif default: // Unknown tag if(tag & 0x4000) { if(tag & 0x2000) // i.e. 0x6xxx = 24bit size. { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else // 16bit size { chunksize = value; chunksize &= 0xffff; } } else if(tag & 0x2000) //24bit LONGs chunk size { optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not // updated the size and turned the tag to optional. TODO : WHY chunksize = 0; // not not skip // chunksize = value + ((tag & 0xff)<<16); // do not skip an unknown but optional chunk // These are only use to size subbands, but the data within should not be skipped // unless if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS) { optional = true; chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord; decoder->uncompressed_size = chunksize*4; decoder->sample_uncompressed = 1; } } //assert(optional); if(!optional) { error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG; } else if(chunksize > 0) // skip this option chunk { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } break; } return error; } void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _THREADED_DECODER // Lock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n", wavelet->band_valid_flags, BAND_VALID_MASK(band)); } #endif // Update the wavelet band flags wavelet->band_valid_flags |= BAND_VALID_MASK(band); wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _THREADED_DECODER // Unlock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { // Update the wavelet band flags #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type) { uint32_t threaded_band_mask; uint32_t wavelet_band_mask; uint32_t decoded_band_mask; bool decoded_bands_valid; // Has this wavelet been created? if (wavelet == NULL) { // Too soon to wait for the wavelet bands to be decoded return false; } // Is this a fieldplus transform? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { // Is this the temporal wavelet? if (index == 2) { assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL); assert(wavelet->num_bands == 2); // Earlier transforms in the queue will compute both wavelet bands return true; } // Is this wavelet at the end of a chain of transforms? if (index == 3 || index == 5) { // Must wait for all bands to be decoded threaded_band_mask = 0; } else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } // Is this a spatial transform? else if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Is this wavelet at the top of the pyramid? if (index == 2) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #if 0 // Is this wavelet at the bottom of the pyramid? else if (index == 0) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #endif else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } else { // Unknown type of transform assert(0); // Assume that the bands are not valid return false; } // Compute the mask for the bands in this wavelet decoded_band_mask = ((1 << wavelet->num_bands) - 1); // Clear the bit for the band computed by the threaded transform decoded_band_mask &= ~threaded_band_mask; // Compute the wavelet bands that have been decoded wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask); // Have all of the bands not computed by the transform thread been decoded? decoded_bands_valid = (wavelet_band_mask == decoded_band_mask); return decoded_bands_valid; } void QueueThreadedTransform(DECODER *decoder, int channel, int index) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; TRANSFORM *transform = decoder->transform[channel]; //IMAGE *wavelet = transform->wavelet[index]; int precision = codec->precision; // The transform data structure must exist assert(transform != NULL); // The transform thread variables should have been created { int free_entry; #if _DELAYED_THREAD_START==0 // Lock access to the transform queue Lock(&decoder->entropy_worker_new.lock); #endif // Copy the transform parameters into the next queue entry free_entry = decoder->transform_queue.free_entry; assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH); if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH) { assert(transform != NULL); assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS); assert(0 <= index && index < TRANSFORM_MAX_WAVELETS); // Note: The wavelet may not exist when the transform is queued decoder->transform_queue.queue[free_entry].transform = transform; decoder->transform_queue.queue[free_entry].channel = channel; decoder->transform_queue.queue[free_entry].index = index; decoder->transform_queue.queue[free_entry].precision = precision; decoder->transform_queue.queue[free_entry].done = 0; // Update the transform request queue decoder->transform_queue.free_entry++; decoder->transform_queue.num_entries++; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index); } #endif } #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } } #if _THREADED_DECODER void WaitForTransformThread(DECODER *decoder) { if(decoder->entropy_worker_new.pool.thread_count) { #if _DELAYED_THREAD_START ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool); decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; } } #endif #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_YUV; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, PIXEL16U *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = (uint8_t *)output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam) { DECODER *decoder = (DECODER *)lpParam; FILE *logfile = decoder->logfile; struct interlace_data *data = &decoder->interlaced_worker.interlace_data; int thread_index; HANDLE hObjects[2]; DWORD dwReturnValue; if(decoder->thread_cntrl.affinity) { HANDLE hCurrentThread = GetCurrentThread(); SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity); } // Set the handler for system exceptions #ifdef _WIN32 SetDefaultExceptionHandler(); #endif // Determine the index of this worker thread if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } thread_index = decoder->interlaced_worker.thread_count++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // The transform worker variables should have been created assert(decoder->interlaced_worker.start_event[thread_index] != NULL); assert(decoder->interlaced_worker.row_semaphore != NULL); assert(decoder->interlaced_worker.done_event[thread_index] != NULL); assert(decoder->interlaced_worker.stop_event != NULL); if (!(decoder->interlaced_worker.start_event[thread_index] != NULL && decoder->interlaced_worker.row_semaphore != NULL && decoder->interlaced_worker.done_event[thread_index] != NULL && decoder->interlaced_worker.stop_event != NULL)) { return 1; } hObjects[0] = decoder->interlaced_worker.start_event[thread_index]; hObjects[1] = decoder->interlaced_worker.stop_event; for (;;) { // Wait for the signal to begin processing a transform dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE); // Received a signal to begin inverse transform processing? if (dwReturnValue == WAIT_OBJECT_0) { int type; // Type of inverse transform to perform int frame_index; // Index of output frame to produce int num_channels; // Number of channels in the transform array uint8_t *output; // Output frame buffer int pitch; // Output frame pitch FRAME_INFO info; // Format of the output frame int chroma_offset; // Offset for the output chroma int precision; // Source pixel bit depth // Lock access to the transform data if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } // Get the processing parameters type = data->type; frame_index = data->frame; num_channels = data->num_channels; output = data->output; pitch = data->pitch; memcpy(&info, &data->info, sizeof(FRAME_INFO)); chroma_offset = data->chroma_offset; precision = data->precision; // Unlock access to the transform data if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // Select the type of inverse transform to perform switch (type) { case THREAD_TRANSFORM_FRAME_YUV: //TODO: more to new _THREADED model TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels, output, pitch, &info, chroma_offset, precision); break; case THREAD_TRANSFORM_FRAME_ROW16U: //TODO: more to new _THREADED model TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels, (PIXEL16U *)output, pitch, &info, chroma_offset, precision); break; default: assert(0); break; } // Signal that this thread is done SetEvent(decoder->interlaced_worker.done_event[thread_index]); } else { // Should have a condition that causes the thread to terminate assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED); break; } } return 0; } #endif void GetDecodedFrameDimensions(TRANSFORM **transform_array, int num_channels, int frame_index, int resolution, int *decoded_width_out, int *decoded_height_out) { IMAGE *wavelet = NULL; int decoded_scale = 0; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_FULL: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; // Is this an intra frame? if (wavelet == NULL) { wavelet = transform_array[0]->wavelet[2]; } break; default: assert(0); break; } // Compute the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; if (decoded_width_out) { *decoded_width_out = decoded_width; } if (decoded_height_out) { *decoded_height_out = decoded_height; } } // Reconstruct Bayer format to the requested output format CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; //int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_W13A: //DAN20101207 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YUYV: //? case DECODED_FORMAT_UYVY: //? case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: error = CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR2: case DECODED_FORMAT_BYR4: { //bool linearRestore = false; unsigned short *curve = NULL; if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { curve = decoder->BYR4LinearRestore; } ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve); } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR3: ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif frame_size = width * height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } } // Using the RGBFilterBuffer16 as scratch space ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; #if _THREADED //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct uncompressed v210 YUV format to the requested output format CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { uint32_t *input_ptr = (uint32_t *)src; int pos = 0; int column=0,length = width; length -= length % 6; //DAN03252004 -- fix a memory overflow. for (column=0; column < length; column += 6) { uint32_t yuv; int y; int u; int v; // Read the first word yuv = *(input_ptr++); u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; // Expand the pixels to sixteen bits u <<= 6; y <<= 6; v <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the second word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the third word yuv = *(input_ptr++); v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; // Read the fourth word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; } if(format == DECODED_FORMAT_UYVY) { for (column=0; column < pos; column += 2) { int t = dst[column]; dst[column] = dst[column+1]; dst[column+1] = t; } } src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; for(i=0; i<lines; i++) { src = (uint8_t *)decoder->uncompressed_chunk; src += row * unc_Stride; // Repack the row of 10-bit pixels into 16-bit pixels ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2); // Advance to the next rows in the input and output images y_row_ptr += orig_width*2; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; } y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + width; v_row_ptr = u_row_ptr + width/2; if(lines == 2) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2; } else if(lines == 4) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2; } roi.width = width; roi.height = 1; planar_output[0] = (uint8_t *)y_row_ptr; planar_output[1] = (uint8_t *)v_row_ptr; planar_output[2] = (uint8_t *)u_row_ptr; planar_pitch[0] = 0; planar_pitch[1] = 0; planar_pitch[2] = 0; if(decoder->apply_color_active_metadata) { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline, info->format, &whitebitdepth, &flags); } else { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; } ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct uncompressed DPX0 RGB format to the requested output format CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; //int output_format = info->output_format; // used by image_dev_only decodes int width = info->width; int height = info->height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(format != DECODED_FORMAT_DPX0) { int unc_Stride = decoder->uncompressed_size / height; ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format); } if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; whitebitdepth = 13; if(decoder->apply_color_active_metadata) flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR; else flags = 0; roi.width = width; roi.height = 1; if(lines == 1) { uint16_t *sptr; uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2); sptr = (uint16_t *)lptr; for(i=0; i<width;i+=8) { int val,r,g,b; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8; j++) { ptr[j] = sptr[0] >> 3; ptr[j+8] = sptr[1] >> 3; ptr[j+16] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8; j++) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+8] = g; ptr[j+16] = b; } } } else { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8*3; j+=3) { ptr[j] = sptr[0] >> 3; ptr[j+1] = sptr[1] >> 3; ptr[j+2] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8*3; j+=3) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+1] = g; ptr[j+2] = b; } } } ptr += 24; } } else if(lines == 2) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>2)+1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } else if(lines == 4) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>1)+2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct Bayer format to the requested output format CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; int resolution = info->resolution; //int format = info->format; // Switch to the subroutine for the requested resolution switch (resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //error = CODEC_ERROR_UNSUPPORTED_FORMAT; return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch); break; case DECODED_RESOLUTION_FULL: //return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; //case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: //return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_QUARTER: //return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_LOWPASS_ONLY: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // The decoded resolution is not supported by this routine assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to full resolution CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; //int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } //TODO: Need to add more output formats to this routine switch (format) { case DECODED_FORMAT_RGB32: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) // TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); // ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; case DECODED_FORMAT_RGB24: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) //TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); //ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data and demosaic to full resolution CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: error = CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct Bayer encoded data to half resolution CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int frame_width = info->width; int frame_height = info->height; //int resolution = info->resolution; int format = info->format; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; PIXEL16U *g1_plane; PIXEL16U *rg_plane; PIXEL16U *bg_plane; PIXEL16U *g2_plane; int g1_pitch; int rg_pitch; int bg_pitch; int g2_pitch; #if 0 int channel; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } #endif // Get the lowpass bands in the wavelet coresponding to the output frame g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0]; rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0]; bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0]; if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK { g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0]; g2_pitch = transform_array[3]->wavelet[frame]->pitch; } else { g2_plane = NULL; g2_pitch = 0; } // Get the pitch of each plane g1_pitch = transform_array[0]->wavelet[frame]->pitch; rg_pitch = transform_array[1]->wavelet[frame]->pitch; bg_pitch = transform_array[2]->wavelet[frame]->pitch; switch (format) { case DECODED_FORMAT_RGB32: ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch, bg_plane, bg_pitch, g2_plane, g2_pitch, output_buffer, output_pitch, frame_width, frame_height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to quarter resolution CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //FRAME_INFO *info = &decoder->frame; //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; //int format = info->format; //TODO: Need to finish this routine assert(0); return error; } // Reconstruct the original YUV 4:2:2 encoded format to the requested output format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; int resolution = info->resolution; int format = info->format; //int color_space = decoder->frame.colorspace; //TODO: Eliminate use of the chroma offset int chroma_offset = decoder->codec.chroma_offset; #if _THREADED // Type of threaded inverse transform //int type; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } //TODO: Split this routine into subroutines for progressive versus interlaced video //TODO: Split progressive and interlaced routines into subroutines for each resolution if(resolution == DECODED_RESOLUTION_HALF) { bool inverted = false; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif if(decoder->use_active_metadata_decoder) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; return CODEC_ERROR_OKAY; #endif } else { int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } return CODEC_ERROR_OKAY; } // Was the video source interlaced or progressive? if (progressive) { // The video source was progressive (the first transform was a spatial transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif /*if(decoder->use_active_metadata_decoder) { switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: // computing the active metadata. case DECODED_FORMAT_UYVY: return CODEC_ERROR_OKAY; break; } }*/ switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); return CODEC_ERROR_OKAY; #endif } break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); return CODEC_ERROR_OKAY; #endif } break; //Handle sizes that are smaller than the interim decode buffer //DAN20081222 case DECODED_FORMAT_CbYCrY_10bit_2_8: decoder->upper_plane = output; decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2; // Use the address and pitch of the lower plane output = decoder->lower_plane; pitch = decoder->frame.width * 2; // Fall through and compute the inverse spatial transform case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: case DECODED_FORMAT_CbYCrY_8bit: case DECODED_FORMAT_CbYCrY_16bit: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_V210: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalYUVStrip16sToYUVOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false) { #if _THREADED TransformInverseSpatialThreadedYUV422ToBuffer(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); #elif 0 TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripYUV16sToPackedRGB32); #endif return CODEC_ERROR_OKAY; } #if _THREADED if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); return CODEC_ERROR_OKAY; } #endif break; default: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } // else Return the error code for unsupported output format break; } } } else { // The video source was interlaced (the first transform was a frame transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { bool inverted = false; if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { // info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active. inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif switch (format & 0x7ffffff) { case DECODED_FORMAT_NV12: case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder if(decoder->use_active_metadata_decoder) { int frame_size = info->width * info->height * 4; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } //TransformInverseSpatialUniversalThreadedToRow16u( // decoder, frame, num_channels, // (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2, // info, chroma_offset, precision); #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 2; // yuv // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif return CODEC_ERROR_OKAY; } } switch (format) { // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_WP13: //DAN20110203 - missing case DECODED_FORMAT_W13A: //DAN20110203 - missing case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into case DECODED_FORMAT_R408: //the output buffer case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); //Old code converts 4:2:2 directly to RGBA (single threaded.) //TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, // info, &decoder->scratch, chroma_offset, precision); #endif return CODEC_ERROR_OKAY; default: // else Return the error code for unsupported output format break; } } } // The output format is not supported by this routine error = CODEC_ERROR_UNSUPPORTED_FORMAT; return error; } // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; //IMAGE *wavelet; //int wavelet_width; //int wavelet_height; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; //int decoded_scale; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //TODO: Eliminate use of the chroma offset if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } // This routine should only be called for progressive frames assert(codec->progressive); // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) { return CODEC_ERROR_OKAY; } // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { return CODEC_ERROR_OKAY; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { return CODEC_ERROR_RESOLUTION; } // Compute the decoded width and height ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { output += (info->height-1)*pitch; pitch = -pitch; } #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); if (!(info->width >= decoded_width)) { return CODEC_ERROR_FRAMESIZE; } // assert((info->height+7)/8 >= (decoded_height+7)/8); // if (!(info->height+7)/8 >= (decoded_height+7)/8) { // return CODEC_ERROR_FRAMESIZE; // } START(tk_convert); if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; int chroma_offset = decoder->codec.chroma_offset; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); } else // Quarter resolution if (resolution == DECODED_RESOLUTION_QUARTER) { // Output quarter resolution for the two frame GOP int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); // Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame } else // Half resolution if (resolution == DECODED_RESOLUTION_HALF) { IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS]; int precision = codec->precision; int chroma_offset = 0; int channel; if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Get the first level wavelet in each channel for (channel = 0; channel < num_channels; channel++) { wavelet_array[channel] = transform_array[channel]->wavelet[frame]; } // Pack the pixels from the lowpass band in each channel into the output buffer CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch, info, chroma_offset, precision); } } // Full resolution or half horizontal else { int chroma_offset = 0; int precision = codec->precision; // Reconstruct the output frame from a full resolution decode //assert(resolution == DECODED_RESOLUTION_FULL); if(decoder->use_active_metadata_decoder) { int frame_size, channels = 3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) channels = 4; frame_size = info->width * info->height * channels * 2; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; switch (info->format) { case DECODED_FORMAT_B64A: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2B64A); #else TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YU64: //TODO : Threading TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2RG30); #else TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripRGB16sToPackedYUV8u); #endif break; case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGBA2YUVA); #else assert(0); #endif break; case DECODED_FORMAT_YR16: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YR16); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_V210: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2v210); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else assert(0);// missing non-threaded version #endif break; //TODO: Add code to handle other Avid pixel formats case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8 case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14 case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6 assert(0); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Invalid decoded format: %d\n", info->format); } #endif assert(0); error = CODEC_ERROR_INVALID_FORMAT; break; } } } STOP(tk_convert); return error; } // Convert 16-bit signed lowpass data into the requested output format void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision) { bool inverted = false; int output_width = info->width; int output_height = info->height; int format = info->format; // Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values const int shift = 16 - precision - PRESCALE_LUMA; START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: inverted = true; case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32_INVERTED: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //WIP ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height, output_pitch, format, inverted, shift, num_channels); break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: { IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; if (info->format == COLOR_FORMAT_YUYV) { ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } else if (info->format == COLOR_FORMAT_UYVY) { ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } } break; default: { int y; IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; IMAGE *a_image = image_array[3]; unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr; //unsigned short *scanline2 = scanline + output_width*3; uint8_t *newline = (uint8_t *)output_buffer; unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL; Rptr = (unsigned short *)r_image->band[0]; Gptr = (unsigned short *)g_image->band[0]; Bptr = (unsigned short *)b_image->band[0]; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { Aptr = (unsigned short *)a_image->band[0]; for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); memcpy(scanline+info->width*3, Aptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; Aptr += a_image->pitch/2; Convert4444LinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } else { for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; ConvertLinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } } //assert(0); break; } STOP(tk_convert); } #if _THREADED // Threaded inverse transform using the new threads API void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //TODO: Add support for more output formats int format = DECODED_FORMAT_RGB32; // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Choose the correct inverse horizontal filter for the output format switch (format) { case DECODED_FORMAT_RGB32: horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32; break; default: assert(0); return; } // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "All worker threads signalled done\n"); } #endif } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToOutput( DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Routines for the worker threads that use the new threads API void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output_buffer, int output_pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; PIXEL *lowlow_band[CODEC_MAX_CHANNELS]; PIXEL *lowhigh_band[CODEC_MAX_CHANNELS]; PIXEL *highlow_band[CODEC_MAX_CHANNELS]; PIXEL *highhigh_band[CODEC_MAX_CHANNELS]; int lowlow_pitch[CODEC_MAX_CHANNELS]; int lowhigh_pitch[CODEC_MAX_CHANNELS]; int highlow_pitch[CODEC_MAX_CHANNELS]; int highhigh_pitch[CODEC_MAX_CHANNELS]; int channel_width[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr; uint8_t *plane_array[TRANSFORM_MAX_CHANNELS]; int plane_pitch[TRANSFORM_MAX_CHANNELS]; int output_width = info->width; int output_height = info->height; int half_height = output_height/2; int luma_band_width; ROI strip; char *bufptr; int last_row; int last_display_row; int last_line; int channel; int row; int odd_display_lines = 0; THREAD_ERROR error; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; //TODO: Replace uses of buffer variables with calls to the scratch space API // This version is for 16-bit pixels assert(sizeof(PIXEL) == 2); // Must have a valid inverse horizontal filter assert(horizontal_filter_proc != NULL); // Check for enough space in the local array allocations // assert(num_channels <= CODEC_NUM_CHANNELS); // assert(num_channels <= TRANSFORM_MAX_CHANNELS); if(num_channels < 3 || num_channels > TRANSFORM_MAX_CHANNELS) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Divide the buffer space between the four threads buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4 buffer += buffer_size * thread_index; // Round the buffer pointer up to the next cache line buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK)); bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE); // Allocate buffer space for the output rows from each channel for (channel = 0; channel < num_channels; channel++) { // Get the row width for this channel IMAGE *wavelet = transform[channel]->wavelet[frame_index]; int width = wavelet->width; int height = wavelet->height; //int pitch = wavelet->pitch; size_t channel_buffer_size; // Compute the width and pitch for the output rows stored in this buffer int buffer_width = 2 * width; int buffer_height = 2; int buffer_pitch = ALIGN16(buffer_width); // Compute the total allocation for this channel channel_buffer_size = buffer_height * buffer_pitch; // Check that there is enough space available assert(channel_buffer_size <= buffer_size); // Allocate the buffer for this channel plane_array[channel] = (uint8_t *)bufptr; // Remember the pitch for rows in this channel plane_pitch[channel] = buffer_pitch; // Advance the buffer pointer past the allocated space for this channel bufptr += channel_buffer_size; // Reduce the amount of space remaining in the buffer buffer_size -= channel_buffer_size; // The dimensions of the output image are the same as the luma channel if (channel == 0) { strip.width = buffer_width; strip.height = buffer_height; last_row = height; //DAN20050606 Added to fix issue with non-div by 8 heihts. last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines. odd_display_lines = info->height & 1; // Remember the width of the wavelet bands for luma luma_band_width = width; } // Save the bands per channel for routines that process all channels at once lowlow_band[channel] = wavelet->band[0]; lowhigh_band[channel] = wavelet->band[1]; highlow_band[channel] = wavelet->band[2]; highhigh_band[channel] = wavelet->band[3]; lowlow_pitch[channel] = wavelet->pitch; lowhigh_pitch[channel] = wavelet->pitch; highlow_pitch[channel] = wavelet->pitch; highhigh_pitch[channel] = wavelet->pitch; // Remember the width of the wavelet for this channel channel_width[channel] = width; } // Use the remaining buffer space for intermediate results buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK)); buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE); if (last_row == last_display_row) { last_line = half_height - 1; } else { last_line = half_height; } if(odd_display_lines) last_line++; if (thread_index == TRANSFORM_WORKER_TOP_THREAD) { // Process the first row row = 0; output_row_ptr = output_buffer; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the first row using special border filters for the top row InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc); } if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1) { if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash { int pitch = output_pitch; // Process the last row row = last_row - 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) pitch >>= 1; // Begin filling the last output row with results output_row_ptr = output_buffer + row * 2 * pitch; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the last row using special border filters for the bottom row if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix output_row_ptr -= output_pitch; InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, odd_display_lines, horizontal_filter_proc); } } // Loop until all of the middle rows have been processed for (;;) { int work_index; int row; // Wait for one row from each channel to process error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index); // Is there another row to process? if (error == THREAD_ERROR_OKAY) { int pitch = output_pitch; // Compute the next row to process from the work index row = work_index + 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked pitch >>= 1; // Compute the output row corresponding to this row index output_row_ptr = output_buffer + row * 2 * pitch; } else { // No more work to do return; } // Is the row inside the top and bottom border? if (0 < row && row < last_line) { int outputlines = 2; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif if(odd_display_lines && row==last_line-1) { outputlines = 1; } // Process the middle row using the normal wavelet filters InvertSpatialMiddleRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc, outputlines); } } } #endif //_THREADED bool GetTuplet(unsigned char *data, int datasize, unsigned short findtag, unsigned short *retvalue) { bool ret = false; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; //char t[100]; InitBitstream(&myinput); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; pinput = &myinput; do { bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = true; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; } /*! Copied from metadata.cpp in the cedoc common directory */ uint8_t *GetTupletAddr(uint8_t *data, int datasize, uint16_t findtag, int16_t *retvalue) { unsigned char *ret = NULL; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; if (data == NULL || datasize == 0) { return NULL; } //InitBitstream(&myinput); memset(&myinput, 0, sizeof(BITSTREAM)); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; myinput.nBitsFree = BITSTREAM_LONG_SIZE; pinput = &myinput; do { //BOOL optional = FALSE; bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); //optional = TRUE; optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = pinput->lpCurrentWord; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; }
kmeans_clustering.c
/*****************************************************************************/ /*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */ /*By downloading, copying, installing or using the software you agree */ /*to this license. If you do not agree to this license, do not download, */ /*install, copy or use the software. */ /* */ /* */ /*Copyright (c) 2005 Northwestern University */ /*All rights reserved. */ /*Redistribution of the software in source and binary forms, */ /*with or without modification, is permitted provided that the */ /*following conditions are met: */ /* */ /*1 Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* */ /*2 Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in the */ /* documentation and/or other materials provided with the distribution.*/ /* */ /*3 Neither the name of Northwestern University nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* */ /*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */ /*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ /*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */ /*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */ /*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */ /*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */ /*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */ /*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */ /*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /*POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************************/ /*************************************************************************/ /** File: kmeans_clustering.c **/ /** Description: Implementation of regular k-means clustering **/ /** algorithm **/ /** Author: Wei-keng Liao **/ /** ECE Department, Northwestern University **/ /** email: wkliao@ece.northwestern.edu **/ /** **/ /** Edited by: Jay Pisharath **/ /** Northwestern University. **/ /** **/ /** ================================================================ **/ /** **/ /** Edited by: Sang-Ha Lee **/ /** University of Virginia **/ /** **/ /** Description: No longer supports fuzzy c-means clustering; **/ /** only regular k-means clustering. **/ /** Simplified for main functionality: regular k-means **/ /** clustering. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" #include "openacc.h" #define RANDOM_MAX 2147483647 #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif #ifndef _NPOINTS #define _NPOINTS 819200 #endif #ifndef _UNROLLFAC_ #define _UNROLLFAC_ 100 #endif #define _NTHREADS (_NPOINTS/_UNROLLFAC_) #ifdef _OPENARC_ #if _NPOINTS == 204800 #pragma openarc #define _NPOINTS 204800 #elif _NPOINTS == 494020 #pragma openarc #define _NPOINTS 494020 #elif _NPOINTS == 819200 #pragma openarc #define _NPOINTS 819200 #endif #if _UNROLLFAC_ == 1 #pragma openarc #define _UNROLLFAC_ 1 #elif _UNROLLFAC_ == 2 #pragma openarc #define _UNROLLFAC_ 2 #elif _UNROLLFAC_ == 4 #pragma openarc #define _UNROLLFAC_ 4 #elif _UNROLLFAC_ == 5 #pragma openarc #define _UNROLLFAC_ 5 #elif _UNROLLFAC_ == 800 #pragma openarc #define _UNROLLFAC_ 800 #elif _UNROLLFAC_ == 10 #pragma openarc #define _UNROLLFAC_ 10 #elif _UNROLLFAC_ == 100 #pragma openarc #define _UNROLLFAC_ 100 #endif #pragma openarc #define _NATTRIBUTES 34 #pragma openarc #define _NCLUSTERS 5 #pragma openarc #define _NTHREADS (_NPOINTS/_UNROLLFAC_) #endif extern double wtime(void); /*----< kmeans_clustering() >---------------------------------------------*/ PAType kmeans_clustering(float feature[_NPOINTS][_NATTRIBUTES], /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int membership[_NPOINTS]) /* out: [npoints] */ { int i, j, k, n=0, index, loop=0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float (*new_centers)[_NATTRIBUTES]; /* [nclusters][nfeatures] */ float (*clusters)[_NATTRIBUTES]; /* out: [nclusters][nfeatures] */ float delta; double timing; int nthreads; //int (*partial_new_centers_len)[_NCLUSTERS]; //float (*partial_new_centers)[_NCLUSTERS][_NATTRIBUTES]; ///////////////////////////////////////////// // Added for inlining find_nearest_point() // ///////////////////////////////////////////// int index_fnp, i_fnp; float max_dist=FLT_MAX; int i_ed; /////////////////////////////////////////////// // Added for unrolling of the parallel loop. // /////////////////////////////////////////////// int tid, ii; nthreads = npoints/_UNROLLFAC_; /* allocate space for returning variable clusters[] */ //clusters = (float (*)[_NATTRIBUTES]) malloc(nclusters * nfeatures * sizeof(float)); clusters = (float (*)[_NATTRIBUTES]) acc_create_unified(NULL, nclusters * nfeatures * sizeof(float)); /* randomly pick cluster centers */ for (i=0; i<nclusters; i++) { //n = (int)rand() % npoints; for (j=0; j<nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } for (i=0; i<npoints; i++) membership[i] = -1; /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int*) calloc(nclusters, sizeof(int)); new_centers = (float (*)[_NATTRIBUTES]) calloc(nclusters * nfeatures, sizeof(float)); //partial_new_centers_len = (int (*)[_NCLUSTERS]) calloc(nthreads*nclusters, sizeof(int)); //partial_new_centers =(float (*)[_NCLUSTERS][_NATTRIBUTES]) calloc(nthreads*nclusters*nfeatures, sizeof(float)); printf("num of threads = %d\n", nthreads); #pragma acc data copyin (feature[0:_NPOINTS][0:_NATTRIBUTES], membership[0:_NPOINTS]) create(clusters[0:_NCLUSTERS][0:_NATTRIBUTES]) do { delta = 0.0F; #pragma acc update device(clusters) #pragma acc kernels loop gang worker independent \ private(i, index, index_fnp, max_dist) \ reduction(+:new_centers[0:_NCLUSTERS][0:_NATTRIBUTES],new_centers_len[0:_NCLUSTERS]) #pragma openarc cuda sharedRW(new_centers_len) for(tid=0; tid<nthreads; tid++) { #pragma acc loop seq for (ii=0; ii<_UNROLLFAC_; ii++) { i = tid + ii*nthreads; /* find the index of nestest cluster centers */ //index = find_nearest_point(feature[i], // nfeatures, // clusters, // nclusters); max_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i_fnp=0; i_fnp<nclusters; i_fnp++) { float dist; //dist = euclid_dist_2(feature[i_fnp], clusters[i_fnp], nfeatures); /* no need square root */ dist = 0.0F; for (i_ed=0; i_ed<nfeatures; i_ed++) dist += (feature[i][i_ed]-clusters[i_fnp][i_ed]) * (feature[i][i_ed]-clusters[i_fnp][i_ed]); if (dist < max_dist) { max_dist = dist; index_fnp = i_fnp; } } index = index_fnp; /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0F; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of all objects located within */ new_centers_len[index]++; for (j=0; j<nfeatures; j++) new_centers[index][j] += feature[i][j]; } } /* end of #pragma omp parallel for */ /* replace old cluster centers with new_centers */ for (i=0; i<nclusters; i++) { for (j=0; j<nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0F; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } } while (delta > threshold && loop++ < 500); printf("loop count: %d\n", loop); free(new_centers); free(new_centers_len); return clusters; }
par_for.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <sys/time.h> //#include "data.h" #include "/esat/puck1/users/nshah/cpu_openmp/tretail_TWO_WAY_PARTITION_TWO_WAY_FULL_CPU_FINE_2.c" /* #include "/esat/puck1/users/nshah/cpu_openmp/tretail_1024threads_512batch.c" */ //#include "/esat/puck1/users/nshah/cpu_openmp/ad_1threads_1batch.c" //#include "./no_backup/wilt_data.h" //#include "./no_backup/tretail_data.h" //extern int batch_sz; //extern int N_for_threads; //extern int N_layers; //extern int *actual_layer_len; //extern int tot_layer_len; int par_for(int n_iter, int batch_sz, int N_for_threads, int N_layers, int *layer_len, int tot_layer_len, int * cum_layer_len, float *res, bool *op, int *ptr_0, int *ptr_1) { //int cum_layer_len = 0; for(int i=0; i< n_iter; i++) { for (int l=0; l< N_layers; l++) { //printf("%d\n", l); #pragma omp parallel { #pragma omp for for (int t = 0; t< N_for_threads; t++) { //printf("%d %d\n", l, t); for (int layer_l = 0; layer_l< layer_len[t*N_layers + l]; layer_l++) { //printf("%d %d %d\n", l, t, layer_l); int cum_layer_l = cum_layer_len[t*(N_layers + 1) + l] + layer_l; int idx=cum_layer_l + t* tot_layer_len; //printf("cum_layer_l: %d, idx: %d\n", cum_layer_l, idx); #pragma omp simd for (int b = 0; b< batch_sz; b++) { float in_0= res[ptr_0[idx] * batch_sz + b]; float in_1= res[ptr_1[idx] * batch_sz + b]; res[ptr_out[idx] * batch_sz + b]= op[idx]? in_0 * in_1 : in_0 + in_1; //printf("l, t, layer_l, b: %d, %d, %d, %d\n", l, t, layer_l, b); } //printf("l, t, layer_l: %d, %d, %d\n", l, t, layer_l); } } } //cum_layer_len += layer_len[l]; } } } int main(int argc, char *argv[]) { //printf("Total threads: %d\n", omp_get_num_threads()); //float * res; //bool* op; //int * ptr_0; //int * ptr_1; //omp_set_num_threads(N_for_threads); par_for(1, batch_sz, N_for_threads, N_layers, layer_len, tot_layer_len, cum_layer_len, res,op,ptr_0,ptr_1); int n_iter= 1e5; n_iter = atoi(argv[1]); struct timeval start, end; gettimeofday(&start, NULL); //printf("Total threads: %d\n", omp_get_num_threads()); par_for(n_iter, batch_sz, N_for_threads, N_layers, layer_len, tot_layer_len, cum_layer_len, res,op,ptr_0,ptr_1); gettimeofday(&end, NULL); float delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; //for(int b=0; b< batch_sz; b++) { for(int b=0; b< 2; b++) { printf("results,%f,actual,%f,", res[(n_tot-1)*batch_sz + b], golden_val); } //printf("results %f, actual: %f\n", res[0], golden_val); printf("total_time,%fs,batch_sz,%d,n_iter,%d,", delta, batch_sz, n_iter); printf("per_batch,%fs,", delta/n_iter); printf("per_inference,%fs,", delta/(n_iter*batch_sz)); }
1e7417_ac_wave_so4.c
#define _POSIX_C_SOURCE 200809L #define START_TIMER(S) \ struct timeval start_##S, end_##S; \ gettimeofday(&start_##S, NULL); #define STOP_TIMER(S, T) \ gettimeofday(&end_##S, NULL); \ T->S += (double)(end_##S.tv_sec - start_##S.tv_sec) + (double)(end_##S.tv_usec - start_##S.tv_usec) / 1000000; #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int yb_size = block_sizes[1]; int x0_blk0_size = block_sizes[2]; int y0_blk0_size = block_sizes[3]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); START_TIMER(section0) for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ STOP_TIMER(section0, timers) } for (int time = time_m, t0 = (time + 1) % (3); time <= time_M; time += 1, t0 = (time + 1) % (3)) { /* Begin section1 */ START_TIMER(section1) STOP_TIMER(section1, timers) /* End section1 */ } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 4][y - time + 4][z + 4]*vp[x - time + 4][y - time + 4][z + 4]); u[t2][x - time + 4][y - time + 4][z + 4] = (r6*(-r7*(u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F*u[t1][x - time + 4][y - time + 4][z + 4])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F*(u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F*(u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F*u[t1][x - time + 4][y - time + 4][z + 4])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } }
matrix.h
//================================================================================== // BSD 2-Clause License // // Copyright (c) 2014-2022, NJIT, Duality Technologies Inc. and other contributors // // All rights reserved. // // Author TPOC: contact@openfhe.org // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //================================================================================== /* This code provide a templated matrix implementation */ #ifndef LBCRYPTO_MATH_MATRIX_H #define LBCRYPTO_MATH_MATRIX_H #include <cmath> #include <functional> #include <iostream> #include <memory> #include <string> #include <vector> #include <utility> #include "lattice/lat-hal.h" #include "math/distrgen.h" #include "math/nbtheory.h" #include "utils/inttypes.h" #include "utils/memory.h" #include "utils/utilities.h" namespace lbcrypto { // Forward declaration class Field2n; template <class Element> class Matrix : public Serializable { public: typedef std::vector<std::vector<Element>> data_t; typedef std::vector<Element> data_row_t; typedef std::function<Element(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } // TODO: add Clear(); /** * Constructor that initializes matrix values using a distribution generation * allocator * * @param &allocZero lambda function for zero initialization (used for * initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for initialization using a distribution * generator. */ Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix. * SetSize must be called on this matrix to use it * SetAlloc needs to be called if 0 passed to constructor * This mostly exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ explicit Matrix(alloc_func allocZero = 0) : data(), rows(0), cols(0), allocZero(allocZero) {} /** * Set the size of a matrix, elements are zeroed out * * @param rows number of rows * @param cols number of colums */ void SetSize(size_t rows, size_t cols) { if (this->rows != 0 || this->cols != 0) { OPENFHE_THROW(not_available_error, "You cannot SetSize on a non-empty matrix"); } this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * SetAllocator - set the function to allocate a zero; * basically only required for deserializer * * @param allocZero */ void SetAllocator(alloc_func allocZero) { this->allocZero = allocZero; } /** * Copy constructor * * @param &other the matrix object to be copied */ Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ Matrix<Element>& operator=(const Matrix<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ Matrix<Element>& Ones() { for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { data[row][col] = 1; } } return *this; } /** * In-place modulo reduction * * @return the resulting matrix */ Matrix<Element>& ModEq(const Element& modulus); /** * modular subtraction * * @return the resulting matrix */ Matrix<Element>& ModSubEq(Matrix<Element> const& b, const Element& modulus); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ Matrix<Element>& Fill(const Element& val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ Matrix<Element>& Identity() { for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { if (row == col) { data[row][col] = 1; } else { data[row][col] = 0; } } } return *this; } /** * Sets the first row to be powers of two for when the base is two * * @param base is the base the digits of the matrix are represented in * @return the resulting matrix */ template <typename T = Element, typename std::enable_if<!std::is_same<T, M2DCRTPoly>::value && !std::is_same<T, M4DCRTPoly>::value && !std::is_same<T, M6DCRTPoly>::value, bool>::type = true> Matrix<T> GadgetVector(int64_t base = 2) const { Matrix<T> g(allocZero, rows, cols); auto base_matrix = allocZero(); size_t k = cols / rows; base_matrix = base; g(0, 0) = 1; for (size_t i = 1; i < k; i++) { g(0, i) = g(0, i - 1) * base_matrix; } for (size_t row = 1; row < rows; row++) { for (size_t i = 0; i < k; i++) { g(row, i + row * k) = g(0, i); } } return g; } template <typename T = Element, typename std::enable_if<std::is_same<T, M2DCRTPoly>::value || std::is_same<T, M4DCRTPoly>::value || std::is_same<T, M6DCRTPoly>::value, bool>::type = true> Matrix<T> GadgetVector(int64_t base = 2) const { Matrix<T> g(allocZero, rows, cols); auto base_matrix = allocZero(); base_matrix = base; size_t bk = 1; auto params = g(0, 0).GetParams()->GetParams(); uint64_t digitCount = (uint64_t)ceil(log2(params[0]->GetModulus().ConvertToDouble()) / log2(base)); for (size_t k = 0; k < digitCount; k++) { for (size_t i = 0; i < params.size(); i++) { NativePoly temp(params[i]); temp = bk; g(0, k + i * digitCount).SetElementAtIndex(i, std::move(temp)); } bk *= base; } size_t kCols = cols / rows; for (size_t row = 1; row < rows; row++) { for (size_t i = 0; i < kCols; i++) { g(row, i + row * kCols) = g(0, i); } } return g; } /** * Computes the infinity norm * * @return the norm in double format */ template <typename T = Element, typename std::enable_if<std::is_same<T, double>::value || std::is_same<T, int>::value || std::is_same<T, int64_t>::value || std::is_same<T, Field2n>::value, bool>::type = true> double Norm() const { OPENFHE_THROW(not_available_error, "Norm not defined for this type"); } template <typename T = Element, typename std::enable_if<!std::is_same<T, double>::value && !std::is_same<T, int>::value && !std::is_same<T, int64_t>::value && !std::is_same<T, Field2n>::value, bool>::type = true> double Norm() const { double retVal = 0.0; double locVal = 0.0; for (size_t row = 0; row < rows; ++row) { for (size_t col = 0; col < cols; ++col) { locVal = data[row][col].Norm(); if (locVal > retVal) { retVal = locVal; } } } return retVal; } /** * Matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> Mult(Matrix<Element> const& other) const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> operator*(Matrix<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> ScalarMult(Element const& other) const { Matrix<Element> result(*this); #pragma omp parallel for for (size_t col = 0; col < result.cols; ++col) { for (size_t row = 0; row < result.rows; ++row) { result.data[row][col] = result.data[row][col] * other; } } return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool Equal(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (data[i][j] != other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator==(Matrix<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator!=(Matrix<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements * that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation * representation */ void SetFormat(Format format); /** * Matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> Add(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Addition operands have incompatible dimensions"); } Matrix<Element> result(*this); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] += other.data[i][j]; } } return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> operator+(Matrix<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ Matrix<Element>& operator+=(Matrix<Element> const& other); /** * Matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> Sub(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { OPENFHE_THROW(math_error, "Subtraction operands have incompatible dimensions"); } Matrix<Element> result(allocZero, rows, other.cols); #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { result.data[i][j] = data[i][j] - other.data[i][j]; } } return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> operator-(Matrix<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ Matrix<Element>& operator-=(Matrix<Element> const& other); /** * Matrix transposition * * @return the resulting matrix */ Matrix<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * Matrix determinant - found using Laplace formula with complexity O(d!), * where d is the dimension * * @param *result where the result is stored */ void Determinant(Element* result) const; // Element Determinant() const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} * multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ Matrix<Element> CofactorMatrix() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ Matrix<Element>& VStack(Matrix<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ Matrix<Element>& HStack(Matrix<Element> const& other); /** * Matrix indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element& operator()(size_t row, size_t col) { return data[row][col]; } /** * Matrix indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element const& operator()(size_t row, size_t col) const { return data[row][col]; } /** * Matrix row extractor * * @param &row row index * @return the row at the index */ Matrix<Element> ExtractRow(size_t row) const { Matrix<Element> result(this->allocZero, 1, this->cols); int i = 0; for (auto& elem : this->GetData()[row]) { result(0, i) = elem; i++; } return result; // return *this; } /** * Matrix column extractor * * @param &col col index * @return the col at the index */ Matrix<Element> ExtractCol(size_t col) const { Matrix<Element> result(this->allocZero, this->rows, 1); for (size_t i = 0; i < this->rows; i++) { result(i, 0) = data[i][col]; } return result; // return *this; } /** * Matrix rows extractor in a range from row_start to row_and; inclusive * * @param &row_start &row_end row indices * @return the rows in the range delimited by indices inclusive */ inline Matrix<Element> ExtractRows(size_t row_start, size_t row_end) const { Matrix<Element> result(this->allocZero, row_end - row_start + 1, this->cols); for (usint row = row_start; row < row_end + 1; row++) { int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(row - row_start, i) = *elem; i++; } } return result; } friend std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m) { os << "[ "; for (size_t row = 0; row < m.GetRows(); ++row) { os << "[ "; for (size_t col = 0; col < m.GetCols(); ++col) { os << m(row, col) << " "; } os << "]\n"; } os << " ]\n"; return os; } /** * Call switch format for each (ring) element * */ void SwitchFormat(); #define NOT_AN_ELEMENT_MATRIX(T) \ template <> \ void Matrix<T>::SwitchFormat() { \ OPENFHE_THROW(not_available_error, "Not a matrix of Elements"); \ } /* * Multiply the matrix by a vector whose elements are all 1's. This causes * the elements of each row of the matrix to be added and placed into the * corresponding position in the output vector. */ Matrix<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as * adding select elements in each row together. Return a vector that is a rows * x 1 matrix. */ Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const; template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::make_nvp("d", data)); ar(::cereal::make_nvp("r", rows)); ar(::cereal::make_nvp("c", cols)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { OPENFHE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("d", data)); ar(::cereal::make_nvp("r", rows)); ar(::cereal::make_nvp("c", cols)); // users will need to SetAllocator for any newly deserialized matrix } std::string SerializedObjectName() const { return "Matrix"; } static uint32_t SerializedVersion() { return 1; } private: data_t data; uint32_t rows; uint32_t cols; alloc_func allocZero; // mutable int NUM_THREADS = 1; // deep copy of data - used for copy constructor void deepCopyData(data_t const& src) { data.clear(); data.resize(src.size()); for (size_t row = 0; row < src.size(); ++row) { for (auto elem = src[row].begin(); elem != src[row].end(); ++elem) { data[row].push_back(*elem); } } } }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template <class Element> Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template <typename Element> Matrix<typename Element::Integer> Rotate(Matrix<Element> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of * https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ template <typename Element> Matrix<typename Element::Vector> RotateVecResult(Matrix<Element> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template <class Element> std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients * because it is formed by discrete gaussians e and s; this implies int32_t can * be used This algorithm can be further improved - see the Darmstadt paper * section 4.4 http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be * computed * @return the resulting matrix of floating-point numbers */ Matrix<double> Cholesky(const Matrix<int32_t>& input); void Cholesky(const Matrix<int32_t>& input, Matrix<double>& result); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger>& input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigVector>& input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension * n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT64_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt64IntoElements(Matrix<int64_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows() / n; \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row * n + i, 0); \ result(row, 0) = values; \ } \ return result; \ } /** * Another method for splitting a vector of int32_t into a vector of ring * elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT32ALT_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt32AltIntoElements(Matrix<int32_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int32_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } /** * Split a vector of int64_t into a vector of ring elements with ring dimension * n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ template <typename Element> Matrix<Element> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, const std::shared_ptr<typename Element::Params> params); #define SPLIT64ALT_FOR_TYPE(T) \ template <> \ Matrix<T> SplitInt64AltIntoElements(Matrix<int64_t> const& other, size_t n, \ const std::shared_ptr<typename T::Params> params) { \ auto zero_alloc = T::Allocator(params, Format::COEFFICIENT); \ size_t rows = other.GetRows(); \ Matrix<T> result(zero_alloc, rows, 1); \ for (size_t row = 0; row < rows; ++row) { \ std::vector<int64_t> values(n); \ for (size_t i = 0; i < n; ++i) \ values[i] = other(row, i); \ result(row, 0) = values; \ } \ return result; \ } } // namespace lbcrypto #endif // LBCRYPTO_MATH_MATRIX_H
rose_scan.c
#include<math.h> #include<string.h> #define N 16 #include "libxomp.h" int main(argc,argv) int argc; char **argv; { int status = 0; XOMP_init(argc,argv); int r; int b; int v; int a[16]; int simd_scan[16]; int scan_a; int scan_b; for (int i = 0; i < 16; i++) { a[i] = i; simd_scan[i] = 0; } scan_a = 0; scan_b = 10; #pragma omp simd reduction(inscan, + : scan_a) for (int i = 0; i < 16; i++) { simd_scan[i] = scan_a; #pragma omp scan exclusive(r,b,v) scan_a += a[i]; scan_b -= a[i]; } XOMP_terminate(status); return 0; }
GB_unaryop__minv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_fp64 // op(A') function: GB_tran__minv_bool_fp64 // C type: bool // A type: double // cast: ; // unaryop: cij = true #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isne_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isne_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isne_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isne_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint64) // A*D function (colscale): GB (_AxD__isne_uint64) // D*A function (rowscale): GB (_DxB__isne_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isne_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isne_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint64) // C=scalar+B GB (_bind1st__isne_uint64) // C=scalar+B' GB (_bind1st_tran__isne_uint64) // C=A+scalar GB (_bind2nd__isne_uint64) // C=A'+scalar GB (_bind2nd_tran__isne_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_UINT64 || GxB_NO_ISNE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MsnhCVMatOp.h
#ifndef MSNHCVOP_H #define MSNHCVOP_H #include <Msnhnet/cv/MsnhCVMat.h> #include <Msnhnet/utils/MsnhTimeUtil.h> namespace Msnhnet { class MsnhNet_API MatOp { public: static void getROI(Mat &src, Mat &dst, const Vec2I32 &p1, const Vec2I32 &p2); static void setROI(Mat &srcDst, Mat &roi, const Vec2I32 &pos); static void cvtColor(Mat &src, Mat &dst, const CvtColorType& cvtType); static void resize(Mat& src, Mat &dst, const Vec2I32 &outSize, const ResizeType& resizeType=RESIZE_BILINEAR); template<typename T> static void copyMakeBorder(Mat& src, Mat &dst, const int &top ,const int &down, const int &left, const int &right, const T & val) { int array = DataType<T>::array; int fmt = DataType<T>::fmt; src.checkPixelType(array, fmt); int srcWidth = src.getWidth() ; int srcHeight = src.getHeight(); int finalWidth = srcWidth + left + right; int finalHeight = srcHeight + top + down; Mat tmpMat(finalWidth, finalHeight, src.getMatType()); tmpMat.fillPixel<T>(val); #ifdef USE_OMP #pragma omp parallel for num_threads(OMP_THREAD) #endif for (int i = 0; i < srcHeight; ++i) { if(fmt == 'b') { memcpy(tmpMat.getData().u8+(finalWidth*(i+top)+left)*array, src.getData().u8+(srcWidth*i)*array, array*srcWidth); } else if(fmt == 'f') { memcpy(tmpMat.getData().f32+(finalWidth*(i+top)+left)*array, src.getData().f32+(srcWidth*i)*array, array*srcWidth*4); } else if(fmt == 'd') { memcpy(tmpMat.getData().f64+(finalWidth*(i+top)+left)*array, src.getData().f64+(srcWidth*i)*array, array*srcWidth*8); } } dst = tmpMat; } static void flip(Mat &mat, const FlipMode &flipMode=FLIP_V); static double norm(Mat &mat1, Mat &mat2, const NormType& normType = NORM_L2); static double norm(Mat &mat, const NormType& normType = NORM_L2); template<typename T> static void _split(Mat &src, std::vector<Mat> &dst) { dst.clear(); if(src.getChannel()==1) { dst.push_back(src); } else if(src.getChannel()==3) { Mat R; Mat G; Mat B; Mat::createMat<T>(src.getWidth(),src.getHeight(),1,R); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,G); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,B); #ifdef USE_OMP uint64_t dataLen = src.getHeight()*src.getWidth(); uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < src.getHeight(); ++i) { for (int j = 0; j < src.getWidth(); ++j) { reinterpret_cast<T*>(R.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 0]; reinterpret_cast<T*>(G.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 1]; reinterpret_cast<T*>(B.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*3 + 2]; } } dst.push_back(R); dst.push_back(G); dst.push_back(B); } else if(src.getChannel()==4) { Mat R; Mat G; Mat B; Mat A; Mat::createMat<T>(src.getWidth(),src.getHeight(),1,R); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,G); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,B); Mat::createMat<T>(src.getWidth(),src.getHeight(),1,A); #ifdef USE_OMP uint64_t dataLen = src.getHeight()*src.getWidth(); uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < src.getHeight(); ++i) { for (int j = 0; j < src.getWidth(); ++j) { reinterpret_cast<T*>(R.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 0]; reinterpret_cast<T*>(G.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 1]; reinterpret_cast<T*>(B.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 2]; reinterpret_cast<T*>(A.getBytes())[i*src.getWidth() + j] = reinterpret_cast<T*>(src.getBytes())[(i*src.getWidth() + j)*4 + 3]; } } dst.push_back(R); dst.push_back(G); dst.push_back(B); dst.push_back(A); } } static void split(Mat &src, std::vector<Mat> &dst); template<typename T> static void _merge(std::vector<Mat> &src, Mat &dst) { int width = src[0].getWidth(); int height = src[1].getHeight(); if(src.size()==1) { dst = src[0]; return; } else if(src.size()==3) { Mat::createMat<T>(width,height,3,dst); #ifdef USE_OMP uint64_t dataLen = width*height; uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 0] = reinterpret_cast<T*>(src[0].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 1] = reinterpret_cast<T*>(src[1].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*3 + 2] = reinterpret_cast<T*>(src[2].getBytes())[i*width + j]; } } } else if(src.size()==4) { Mat::createMat<T>(width,height,4,dst); #ifdef USE_OMP uint64_t dataLen = width*height; uint16_t threadNum = dataLen>MIN_OMP_DATA?OMP_THREAD:1; #pragma omp parallel for num_threads(threadNum) #endif for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 0] = reinterpret_cast<T*>(src[0].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 1] = reinterpret_cast<T*>(src[1].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 2] = reinterpret_cast<T*>(src[2].getBytes())[i*width + j]; reinterpret_cast<T*>(dst.getBytes())[(i*width + j)*4 + 3] = reinterpret_cast<T*>(src[3].getBytes())[i*width + j]; } } } } static void merge(std::vector<Mat> &src, Mat &dst); static bool checkMatsProps(Mat &mat1, Mat &mat2); static void threshold(Mat &src, Mat &dst, const double& threshold, const double& maxVal, const int &thresholdType); static Mat hContact(const Mat &A, const Mat &B); static Mat vContact(const Mat &A, const Mat &B); static std::vector<int> histogram(Mat &src); static uint8_t getOtsu(Mat &src); private: static void RGB2BGR(const Mat &src, Mat &dst); static void RGB2GRAY(Mat &src, Mat &dst); static void RGBA2GRAY(Mat &src, Mat &dst); static void GRAY2RGB(Mat &src,Mat &dst); static void GRAY2RGBA(Mat &src, Mat &dst); static void RGB2RGBA(Mat &src, Mat &dst); static void RGBA2RGB(Mat &src, Mat &dst); static void flipV(Mat &mat); static void flipH(Mat &mat); }; } #endif
region_layer.c
#include "region_layer.h" #include "activations.h" #include "blas.h" #include "box.h" #include "cuda.h" #include "utils.h" #include <stdio.h> #include <assert.h> #include <string.h> #include <stdlib.h> #define DOABS 1 region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) { region_layer l = {0}; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = calloc(1, sizeof(float)); l.biases = calloc(n*2, sizeof(float)); l.bias_updates = calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = max_boxes*(5); l.delta = calloc(batch*l.outputs, sizeof(float)); l.output = calloc(batch*l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; l.backward = backward_region_layer; #ifdef GPU l.forward_gpu = forward_region_layer_gpu; l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(0); return l; } void resize_region_layer(layer *l, int w, int h) { int old_w = l->w; int old_h = l->h; l->w = w; l->h = h; l->outputs = h*w*l->n*(l->classes + l->coords + 1); l->inputs = l->outputs; l->output = realloc(l->output, l->batch*l->outputs*sizeof(float)); l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float)); #ifdef GPU if (old_w < w || old_h < h) { cuda_free(l->delta_gpu); cuda_free(l->output_gpu); l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); } #endif } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; b.y = (j + logistic_activate(x[index + 1])) / h; b.w = exp(x[index + 2]) * biases[2*n]; b.h = exp(x[index + 3]) * biases[2*n+1]; if(DOABS){ b.w = exp(x[index + 2]) * biases[2*n] / w; b.h = exp(x[index + 3]) * biases[2*n+1] / h; } return b; } float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) { box pred = get_region_box(x, biases, n, index, i, j, w, h); float iou = box_iou(pred, truth); float tx = (truth.x*w - i); float ty = (truth.y*h - j); float tw = log(truth.w / biases[2*n]); float th = log(truth.h / biases[2*n + 1]); if(DOABS){ tw = log(truth.w*w / biases[2*n]); th = log(truth.h*h / biases[2*n + 1]); } delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); delta[index + 2] = scale * (tw - x[index + 2]); delta[index + 3] = scale * (th - x[index + 3]); return iou; } void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) { int i, n; if(hier){ float pred = 1; while(class_id >= 0){ pred *= output[index + class_id]; int g = hier->group[class_id]; int offset = hier->group_offset[g]; for(i = 0; i < hier->group_size[g]; ++i){ delta[index + offset + i] = scale * (0 - output[index + offset + i]); } delta[index + class_id] = scale * (1 - output[index + class_id]); class_id = hier->parent[class_id]; } *avg_cat += pred; } else { // Focal loss if (focal_loss) { // Focal Loss float alpha = 0.5; // 0.25 or 0.5 //float gamma = 2; // hardcoded in many places of the grad-formula int ti = index + class_id; float pt = output[ti] + 0.000000000000001F; // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832 //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); delta[index + n] *= alpha*grad; if (n == class_id) *avg_cat += output[index + n]; } } else { // default for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); if (n == class_id) *avg_cat += output[index + n]; } } } } float logit(float x) { return log(x/(1.-x)); } float tisnan(float x) { return (x != x); } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc; } void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); void forward_region_layer(const region_layer l, network_state state) { int i,j,b,t,n; int size = l.coords + l.classes + 1; memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); #endif for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; l.output[index + 4] = logistic_activate(l.output[index + 4]); } } #ifndef GPU if (l.softmax_tree){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); } } } #endif if(!state.train) return; memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); float avg_iou = 0; float recall = 0; float avg_cat = 0; float avg_obj = 0; float avg_anyobj = 0; int count = 0; int class_count = 0; *(l.cost) = 0; for (b = 0; b < l.batch; ++b) { if(l.softmax_tree){ int onlyclass_id = 0; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); if(!truth.x) break; // continue; int class_id = state.truth[t*5 + b*l.truths + 4]; float maxp = 0; int maxi = 0; if(truth.x > 100000 && truth.y > 100000){ for(n = 0; n < l.n*l.w*l.h; ++n){ int index = size*n + b*l.outputs + 5; float scale = l.output[index-1]; float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); if(p > maxp){ maxp = p; maxi = n; } } int index = size*maxi + b*l.outputs + 5; delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++class_count; onlyclass_id = 1; break; } } if(onlyclass_id) continue; } for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w; ++i) { for (n = 0; n < l.n; ++n) { int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); float best_iou = 0; int best_class_id = -1; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file if(!truth.x) break; // continue; float iou = box_iou(pred, truth); if (iou > best_iou) { best_class_id = state.truth[t*5 + b*l.truths + 4]; best_iou = iou; } } avg_anyobj += l.output[index + 4]; l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); else{ if (best_iou > l.thresh) { l.delta[index + 4] = 0; if(l.classfix > 0){ delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); ++class_count; } } } if(*(state.net.seen) < 12800){ box truth = {0}; truth.x = (i + .5)/l.w; truth.y = (j + .5)/l.h; truth.w = l.biases[2*n]; truth.h = l.biases[2*n+1]; if(DOABS){ truth.w = l.biases[2*n]/l.w; truth.h = l.biases[2*n+1]/l.h; } delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); } } } } for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) { printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1); getchar(); continue; // if label contains class_id more than number of classes in the cfg-file } if(!truth.x) break; // continue; float best_iou = 0; int best_index = 0; int best_n = 0; i = (truth.x * l.w); j = (truth.y * l.h); //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); box truth_shift = truth; truth_shift.x = 0; truth_shift.y = 0; //printf("index %d %d\n",i, j); for(n = 0; n < l.n; ++n){ int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); if(l.bias_match){ pred.w = l.biases[2*n]; pred.h = l.biases[2*n+1]; if(DOABS){ pred.w = l.biases[2*n]/l.w; pred.h = l.biases[2*n+1]/l.h; } } //printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); pred.x = 0; pred.y = 0; float iou = box_iou(pred, truth_shift); if (iou > best_iou){ best_index = index; best_iou = iou; best_n = n; } } //printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); if(iou > .5) recall += 1; avg_iou += iou; //l.delta[best_index + 4] = iou - l.output[best_index + 4]; avg_obj += l.output[best_index + 4]; l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); if (l.rescore) { l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); } if (l.map) class_id = l.map[class_id]; delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++count; ++class_count; } } //printf("\n"); #ifndef GPU flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0); #endif *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); } void backward_region_layer(const region_layer l, network_state state) { axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); } void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; #pragma omp parallel for for (i = 0; i < l.w*l.h; ++i){ int j, n; int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = i*l.n + n; int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; if(l.classfix == -1 && scale < .5) scale = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; if(l.softmax_tree){ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if(map){ for(j = 0; j < 200; ++j){ float prob = scale*predictions[class_index+map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for(j = l.classes - 1; j >= 0; --j){ if(!found && predictions[class_index + j] > .5){ found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index+j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { for(j = 0; j < l.classes; ++j){ float prob = scale*predictions[class_index+j]; probs[index][j] = (prob > thresh) ? prob : 0; } } if(only_objectness){ probs[index][0] = scale; } } } } #ifdef GPU void forward_region_layer_gpu(const region_layer l, network_state state) { /* if(!state.train){ copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); return; } */ flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if(l.softmax_tree){ int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } }else if (l.softmax){ softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float *in_cpu = calloc(l.batch*l.inputs, sizeof(float)); float *truth_cpu = 0; if(state.truth){ int num_truth = l.batch*l.truths; truth_cpu = calloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); //cudaStreamSynchronize(get_cuda_stream()); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_region_layer(l, cpu_state); //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); free(cpu_state.input); if(!state.train) return; cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); //cudaStreamSynchronize(get_cuda_stream()); if(cpu_state.truth) free(cpu_state.truth); } void backward_region_layer_gpu(region_layer l, network_state state) { flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta); } #endif void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w = 0; int new_h = 0; if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i, j, n, z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w / 2; ++i) { for (n = 0; n < l.n; ++n) { for (z = 0; z < l.classes + l.coords + 1; ++z) { int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if (z == 0) { flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for (i = 0; i < l.outputs; ++i) { l.output[i] = (l.output[i] + flip[i]) / 2.; } } for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int index = n*l.w*l.h + i; for (j = 0; j < l.classes; ++j) { dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if (dets[index].mask) { for (j = 0; j < l.coords - 4; ++j) { dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if (l.softmax_tree) { hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h); if (map) { for (j = 0; j < 200; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } else { int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h); dets[index].prob[j] = (scale > thresh) ? scale : 0; } } else { if (dets[index].objectness) { for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); }
GB_binop__max_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__max_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_uint8) // A*D function (colscale): GB (_AxD__max_uint8) // D*A function (rowscale): GB (_DxB__max_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__max_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__max_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_uint8) // C=scalar+B GB (_bind1st__max_uint8) // C=scalar+B' GB (_bind1st_tran__max_uint8) // C=A+scalar GB (_bind2nd__max_uint8) // C=A'+scalar GB (_bind2nd_tran__max_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_UINT8 || GxB_NO_MAX_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
i3lock-fancy-rapid.c
/* * BSD 3-Clause License * * Copyright (c) 2018-2019, The i3lock-fancy-rapid authors * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <unistd.h> #include <sys/wait.h> #include <X11/Xlib.h> #include <X11/Xutil.h> #include <omp.h> #include <string.h> void box_blur_h(unsigned char *dest, unsigned char *src, int height, int width, int radius) { double coeff = 1.0 / (radius * 2 + 1); #pragma omp parallel for for (int i = 0; i < height; ++i) { int iwidth = i * width; double r_acc = 0.0; double g_acc = 0.0; double b_acc = 0.0; for (int j = -radius; j < width; ++j) { if (j - radius - 1 >= 0) { int index = (iwidth + j - radius - 1) * 3; r_acc -= coeff * src[index]; g_acc -= coeff * src[index + 1]; b_acc -= coeff * src[index + 2]; } if (j + radius < width) { int index = (iwidth + j + radius) * 3; r_acc += coeff * src[index]; g_acc += coeff * src[index + 1]; b_acc += coeff * src[index + 2]; } if (j < 0) continue; int index = (iwidth + j) * 3; dest[index] = r_acc + 0.5; dest[index + 1] = g_acc + 0.5; dest[index + 2] = b_acc + 0.5; } } } static inline void transpose(unsigned char *dest, unsigned char *src, int height, int width) { for (int i = 0; i < height; ++i) { int iwidth = i * width; for (int j = 0; j < width; ++j) { int nIndex = 3 * (iwidth + j); int tIndex = 3 * (j * height + i); dest[tIndex] = src[nIndex]; dest[tIndex+1] = src[nIndex+1]; dest[tIndex+2] = src[nIndex+2]; } } } void box_blur(unsigned char *dest, unsigned char *src, int height, int width, int radius, int times) { for (int i = 0; i < times; ++i) { box_blur_h(dest, src, height, width, radius); memcpy(src, dest, height * width * 3); } transpose(src, dest, height, width); for (int i = 0; i < times; ++i) { box_blur_h(dest, src, width, height, radius); memcpy(src, dest, height * width * 3); } transpose(dest, src, width, height); } void pixelate(unsigned char *dest, unsigned char *src, int height, int width, int radius) { radius = radius * 2 + 1; #pragma omp parallel for for (int i = 0; i < height; i += radius) { for (int j = 0; j < width; j += radius) { int amount = 0; int r = 0; int g = 0; int b = 0; for (int k = 0; k < radius; ++k) { if (i + k >= height) break; for (int l = 0; l < radius; ++l) { if (j + l >= width) break; ++amount; int index = ((i + k) * width + (j + l)) * 3; r += src[index]; g += src[index + 1]; b += src[index + 2]; } } r /= amount; g /= amount; b /= amount; for (int k = 0; k < radius; ++k) { if (i + k >= height) break; for (int l = 0; l < radius; ++l) { if (j + l >= width) break; int index = ((i + k) * width + (j + l)) * 3; dest[index] = r; dest[index + 1] = g; dest[index + 2] = b; } } } } } int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "usage: %s radius times [OPTIONS]\n" "pass \"pixel\" for times to get pixelation\n", argv[0]); exit(EXIT_FAILURE); } Display *display = XOpenDisplay(NULL); Window root = XDefaultRootWindow(display); XWindowAttributes gwa; XGetWindowAttributes(display, root, &gwa); int height = gwa.height; int width = gwa.width; unsigned char *preblur = malloc(height * width * 3); XImage *image = XGetImage(display, root, 0, 0, width, height, AllPlanes, ZPixmap); for (int i = 0; i < height; ++i) { int iwidth = i * width; for (int j = 0; j < width; ++j) { int index = (iwidth + j) * 3; unsigned long pixel = XGetPixel(image, j, i); preblur[index] = (pixel & image->red_mask) >> 16; preblur[index + 1] = (pixel & image->green_mask) >> 8; preblur[index + 2] = pixel & image->blue_mask; } } XDestroyImage(image); XDestroyWindow(display, root); XCloseDisplay(display); unsigned char *postblur = malloc(height * width * 3); int radius = atoi(argv[1]); if (radius < 0) { fprintf(stderr, "Radius has to be non-negative!\n"); exit(EXIT_FAILURE); } if (strcmp(argv[2], "pixel") == 0) { pixelate(postblur, preblur, height, width, radius); } else { int times = atoi(argv[2]); if (times < 0) { fprintf(stderr, "Times has to be non-negative!\n"); exit(EXIT_FAILURE); } box_blur(postblur, preblur, height, width, radius, times); } free(preblur); int fds[2]; pipe(fds); if (fork()) { write(fds[1], postblur, height * width * 3); int status; wait(&status); exit(WEXITSTATUS(status)); } else { dup2(fds[0], STDIN_FILENO); char fmt[32]; snprintf(fmt, sizeof(fmt), "%ix%i:rgb", width, height); char *new_argv[argc + 3]; new_argv[0] = "i3lock"; new_argv[1] = "-i"; new_argv[2] = "/dev/stdin"; new_argv[3] = "--raw"; new_argv[4] = fmt; for (int i = 3; i < argc; ++i) new_argv[i + 2] = argv[i]; new_argv[argc + 2] = NULL; execvp(new_argv[0], new_argv); exit(EXIT_FAILURE); } }
mpncwa.c
/* $Header$ */ /* mpncwa -- netCDF weighted averager */ /* Purpose: Compute averages of specified hyperslabs of specfied variables in a single input netCDF file and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* fxm: 19981202 Deactivated -n and -W switches while I rethink the normalization switches */ /* Usage: ncwa -O -a lon ~/nco/data/in.nc ~/foo.nc ncwa -O -R -p /ZENDER/tmp -l ~/nco/data in.nc ~/foo.nc ncwa -O -C -a lat,lon,time -w gw -v PS -p /fs/cgd/csm/input/atm SEP1.T42.0596.nc ~/foo.nc;ncks -H foo.nc scp ~/nco/src/nco/ncwa.c esmf.ess.uci.edu:nco/src/nco */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* abs, getopt, malloc, strtol */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #include <unistd.h> /* POSIX stuff */ #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI #include <mpi.h> /* MPI definitions */ #include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* Personal headers */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #ifdef HAVE_BISON_FLEX # include "ncap_utl.h" /* netCDF arithmetic processor-specific definitions (symbol table, ...) */ #endif /* !HAVE_BISON_FLEX */ #include "libnco.h" /* netCDF Operator (NCO) library */ /* Global variables (keep consistent with global variables declared in ncap.c) */ size_t ncap_ncl_dpt_crr=0UL; /* [nbr] Depth of current #include file (incremented in ncap.l) */ size_t *ncap_ln_nbr_crr; /* [cnt] Line number (incremented in ncap.l) */ char **ncap_fl_spt_glb; /* [fl] Script file */ int main(int argc,char **argv) { char **dmn_avg_lst_in=NULL_CEWI; /* Option a */ char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in=NULL_CEWI; char **var_lst_in=NULL_CEWI; char *cmd_ln; char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *lmt_arg[NC_MAX_DIMS]; char *msk_nm=NULL; char *msk_cnd_sng=NULL; /* Mask string to be "parsed" and values given to msk_nm, msk_val, op_typ_rlt */ char *nco_op_typ_sng; /* Operation type */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char *wgt_nm=NULL; const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567Aa:B:bCcD:d:FhIL:l:M:m:nNOo:p:rRST:t:v:Ww:xy:-:"; cnk_dmn_sct **cnk_dmn=NULL_CEWI; #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.MRV_flg=False,.flg_ddra=False,.lmn_nbr=0LL,.lmn_nbr_avg=0LL,.lmn_nbr_wgt=0LL,.nco_op_typ=nco_op_nil,.rnk_avg=0,.rnk_var=0,.rnk_wgt=0,.tmr_flg=nco_tmr_srt,.var_idx=0,.wgt_brd_flg=False,.wrd_sz=0}; #endif /* !__cplusplus */ dmn_sct **dim=NULL_CEWI; dmn_sct **dmn_out=NULL_CEWI; dmn_sct **dmn_avg=NULL_CEWI; double msk_val=1.0; /* Option M */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ int *in_id_arr; int abb_arg_nbr=0; int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_avg_nbr=0; int fl_idx=int_CEWI; int fl_nbr=0; int fl_in_fmt; /* [enm] Input file format */ int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int idx=int_CEWI; int idx_avg; int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_out=0; int nbr_dmn_xtr; int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ int nco_op_typ=nco_op_avg; /* Operation type */ int op_typ_rlt=0; /* Option o */ int opt; int out_id; int rcd=NC_NOERR; /* [rcd] Return code */ int rec_dmn_id=NCO_REC_DMN_UNDEFINED; /* [id] Record dimension ID in input file */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; lmt_sct **lmt; cnv_sct *cnv; /* [sct] Convention structure */ nco_bool DO_CONFORM_MSK=False; /* Did nco_var_cnf_dmn() find truly conforming mask? */ nco_bool DO_CONFORM_WGT=False; /* Did nco_var_cnf_dmn() find truly conforming weight? */ nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool FL_RTR_RMT_LCN; nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MULTIPLY_BY_TALLY=False; /* Not currently implemented */ nco_bool MUST_CONFORM=False; /* [flg] Must nco_var_cnf_dmn() find truly conforming variables? */ nco_bool NORMALIZE_BY_TALLY=True; /* Not currently implemented */ nco_bool NORMALIZE_BY_WEIGHT=True; /* Not currently implemented */ nco_bool NRM_BY_DNM=True; /* Option N Normalize by denominator */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WGT_MSK_CRD_VAR=True; /* [flg] Weight and/or mask coordinate variables */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ nco_bool flg_ddra=False; /* [flg] DDRA diagnostics */ nco_bool flg_opt_a=False; /* Option a */ nco_bool flg_rdd=False; /* [flg] Retain degenerate dimensions */ nm_id_sct *dmn_lst; nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */ nm_id_sct *dmn_avg_lst; prs_sct prs_arg; /* I/O [sct] Global information required in ncwa parser */ size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out; var_sct **var_prc; var_sct **var_prc_out; var_sct *msk=NULL; var_sct *msk_out=NULL; var_sct *wgt=NULL; var_sct *wgt_avg=NULL; var_sct *wgt_out=NULL; #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */ nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */ int fl_nm_lng; /* [nbr] Output file name length */ int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */ int msg_tag_typ; /* [enm] MPI message tag type */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ int tkn_wrt_rsp; /* [enm] Response to request for write token */ int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */ int rnk_wrk; /* [idx] Worker rank */ int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"ddra",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"mdl_cmp",no_argument,0,0}, /* [flg] DDRA diagnostics */ {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"average",required_argument,0,'a'}, {"avg",required_argument,0,'a'}, {"append",no_argument,0,'A'}, {"mask_condition",required_argument,0,'B'}, {"msk_cnd_sng",required_argument,0,'B'}, {"retain-degenerate-dimensions",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"rdd",no_argument,0,'b'}, /* [flg] Retain degenerate dimensions */ {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"wgt_msk_crd_var",no_argument,0,'I'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"mask-variable",required_argument,0,'m'}, {"mask_variable",required_argument,0,'m'}, {"mask",required_argument,0,'m'}, {"msk_var",required_argument,0,'m'}, {"msk_nm",required_argument,0,'m'}, {"mask-value",required_argument,0,'M'}, {"mask_value",required_argument,0,'M'}, {"msk_val",required_argument,0,'M'}, {"nintap",required_argument,0,'n'}, {"nmr",no_argument,0,'N'}, {"numerator",no_argument,0,'N'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"path",required_argument,0,'p'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"suspend", no_argument,0,'S'}, {"mask_comparator",required_argument,0,'T'}, {"msk_cmp_typ",required_argument,0,'T'}, {"op_rlt",required_argument,0,'T'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"normalize-by-tally",no_argument,0,'W',}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"weight",no_argument,0,'w'}, {"wgt",no_argument,0,'w'}, {"wgt_var",no_argument,0,'w'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {"help",no_argument,0,'?'}, {"hlp",no_argument,0,'?'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef ENABLE_MPI /* MPI Initialization */ MPI_Init(&argc,&argv); MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr); MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk); #endif /* !ENABLE_MPI */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); NORMALIZE_BY_TALLY=NORMALIZE_BY_TALLY; /* CEWI: Avert compiler warning that variable is set but never used */ NORMALIZE_BY_WEIGHT=NORMALIZE_BY_WEIGHT; /* CEWI: Avert compiler warning that variable is set but never used */ /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"ddra") || !strcmp(opt_crr,"mdl_cmp")) ddra_info.flg_ddra=flg_ddra=True; /* [flg] DDRA diagnostics */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=!FORCE_APPEND; break; case 'a': /* Dimensions over which to average hyperslab */ if(flg_opt_a){ (void)fprintf(fp_stdout,"%s: ERROR Option -a appears more than once\n",nco_prg_nm); (void)fprintf(fp_stdout,"%s: HINT Use -a dim1,dim2,... not -a dim1 -a dim2 ...\n",nco_prg_nm); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); } /* endif */ dmn_avg_lst_in=nco_lst_prs_2D(optarg,",",&dmn_avg_nbr); flg_opt_a=True; break; case 'B': /* Mask string to be parsed */ msk_cnd_sng=(char *)strdup(optarg); break; case 'b': /* [flg] Retain degenerate dimensions */ flg_rdd=True; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'I': /* [flg] Weight and/or mask coordinate variables */ WGT_MSK_CRD_VAR=!WGT_MSK_CRD_VAR; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'm': /* Name of variable to use as mask in reducing. Default is none */ msk_nm=(char *)strdup(optarg); break; case 'M': /* Good data defined by relation to mask value. Default is 1. */ msk_val=strtod(optarg,&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtod",sng_cnv_rcd); break; case 'N': NRM_BY_DNM=False; NORMALIZE_BY_TALLY=False; NORMALIZE_BY_WEIGHT=False; break; case 'n': NORMALIZE_BY_WEIGHT=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I rethink its implementation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; #ifdef ENABLE_MPI case 'S': /* Suspend with signal handler to facilitate debugging */ if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stdout,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm); while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */ break; #endif /* !ENABLE_MPI */ case 'T': /* Relational operator type. Default is 0, eq, equality */ op_typ_rlt=nco_op_prs_rlt(optarg); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'W': NORMALIZE_BY_TALLY=False; (void)fprintf(fp_stdout,"%s: ERROR This option has been disabled while I rethink its implementation\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; case 'w': /* Variable to use as weight in reducing. Default is none */ wgt_nm=(char *)strdup(optarg); break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'y': /* Operation type */ nco_op_typ_sng=(char *)strdup(optarg); nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Print proper usage */ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Parse mask string */ if(msk_cnd_sng){ int cst_zero=0; /* Set arguments for scan */ prs_arg.fl_in=NULL; /* [sng] Input data file */ prs_arg.in_id=0; /* [id] Input data file ID */ prs_arg.fl_out=NULL; /* [sng] Output data file */ prs_arg.out_id=0; /* [id] Output data file ID */ prs_arg.att_lst=NULL; /* [sct] Attributes in script */ prs_arg.nbr_att=&cst_zero; /* [nbr] Number of attributes in script */ prs_arg.dmn_in=NULL; /* [dmn_in] List of all dimensions in input */ prs_arg.nbr_dmn_in=0; /* [nbr] Number of dimensions in input */ prs_arg.dmn_out=NULL; /* [sct] Pointer to output dimension list */ prs_arg.nbr_dmn_out=&cst_zero; /* [nbr] Number of dimensions in output list */ prs_arg.sym_tbl=NULL; /* [fnc] Symbol table for functions */ prs_arg.sym_tbl_nbr=0; /* [nbr] Number of functions in table */ prs_arg.ntl_scn=False; /* [flg] Initial scan of script */ prs_arg.var_LHS=NULL; /* [var] LHS cast variable */ prs_arg.nco_op_typ=nco_op_nil; /* [enm] Operation type */ /* Initialize line counter */ ncap_ln_nbr_crr=(size_t *)nco_realloc(ncap_ln_nbr_crr,ncap_ncl_dpt_crr+1UL); ncap_ln_nbr_crr[ncap_ncl_dpt_crr]=1UL; /* [cnt] Line number incremented in ncap.l */ if(ncap_ncwa_scn(&prs_arg,msk_cnd_sng,&msk_nm,&msk_val,&op_typ_rlt) == 0) nco_exit(EXIT_FAILURE); } /* endif msk_cnd_sng */ /* Ensure we do not attempt to normalize by non-existent weight */ if(wgt_nm == NULL) NORMALIZE_BY_WEIGHT=False; /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); /* Make uniform list of user-specified chunksizes */ if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg); /* Make uniform list of user-specified dimension limits */ lmt=nco_lmt_prs(lmt_nbr,lmt_arg); /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); /* Get number of variables, dimensions, and record dimension ID of input file */ (void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id); (void)nco_inq_format(in_id,&fl_in_fmt); /* Form initial extraction list which may include extended regular expressions */ xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr); /* Change included variables to excluded variables */ if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Add all coordinate variables to extraction list */ if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv); /* Extract coordinates associated with extracted variables */ if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv); /* Sort extraction list by variable ID for fastest I/O */ if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False); /* Find coordinate/dimension values associated with user-specified limits NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */ for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV); /* Find dimensions associated with variables to be extracted */ dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr); /* Fill-in dimension structure for all extracted dimensions */ dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *)); for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm); /* Merge hyperslab limit information into dimension structures */ if(lmt_nbr > 0) (void)nco_dmn_lmt_mrg(dim,nbr_dmn_xtr,lmt,lmt_nbr); /* Not specifying any dimensions is interpreted as specifying all dimensions */ if(dmn_avg_nbr == 0){ dmn_avg_nbr=nbr_dmn_xtr; dmn_avg_lst_in=(char **)nco_malloc(dmn_avg_nbr*sizeof(char *)); for(idx=0;idx<dmn_avg_nbr;idx++){ dmn_avg_lst_in[idx]=(char *)strdup(dmn_lst[idx].nm); } /* end loop over idx */ (void)fprintf(stderr,"%s: INFO No dimensions specified with -a, therefore reducing (averaging, taking minimum, etc.) over all dimensions\n",nco_prg_nm); } /* end if dmn_avg_nbr == 0 */ /* Dimension list no longer needed */ dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr); if(dmn_avg_nbr > 0){ if(dmn_avg_nbr > nbr_dmn_xtr){ (void)fprintf(fp_stdout,"%s: ERROR More reducing dimensions than extracted dimensions\n",nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ /* Create structured list of reducing dimension names and IDs */ dmn_avg_lst=nco_dmn_lst_mk(in_id,dmn_avg_lst_in,dmn_avg_nbr); /* Dimension average list no longer needed */ if(dmn_avg_nbr > 0) dmn_avg_lst_in=nco_sng_lst_free(dmn_avg_lst_in,dmn_avg_nbr); /* Form list of reducing dimensions from extracted input dimensions */ dmn_avg=(dmn_sct **)nco_malloc(dmn_avg_nbr*sizeof(dmn_sct *)); for(idx_avg=0;idx_avg<dmn_avg_nbr;idx_avg++){ for(idx=0;idx<nbr_dmn_xtr;idx++){ if(!strcmp(dmn_avg_lst[idx_avg].nm,dim[idx]->nm)) break; } /* end loop over idx_avg */ if(idx != nbr_dmn_xtr){ dmn_avg[idx_avg]=dim[idx]; }else{ (void)fprintf(stderr,"%s: WARNING reducing dimension \"%s\" is not contained in any variable in extraction list\n",nco_prg_nm,dmn_avg_lst[idx_avg].nm); /* Collapse dimension average list by omitting irrelevent dimension */ (void)memmove(dmn_avg_lst+idx_avg*sizeof(nm_id_sct),dmn_avg_lst+(idx_avg+1)*sizeof(nm_id_sct),(dmn_avg_nbr-idx_avg-1)*sizeof(nm_id_sct)); --dmn_avg_nbr; dmn_avg_lst=(nm_id_sct *)nco_realloc(dmn_avg_lst,dmn_avg_nbr*sizeof(nm_id_sct)); dmn_avg=(dmn_sct **)nco_realloc(dmn_avg,dmn_avg_nbr*sizeof(dmn_sct *)); } /* end else */ } /* end loop over idx_avg */ /* Make sure no reducing dimension is specified more than once */ for(idx=0;idx<dmn_avg_nbr;idx++){ for(idx_avg=0;idx_avg<dmn_avg_nbr;idx_avg++){ if(idx_avg != idx){ if(dmn_avg[idx]->id == dmn_avg[idx_avg]->id){ (void)fprintf(fp_stdout,"%s: ERROR %s specified more than once in reducing list\n",nco_prg_nm,dmn_avg[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if */ } /* end if */ } /* end loop over idx_avg */ } /* end loop over idx */ /* Averaged dimensions appear in output file iff flg_rdd is set */ dmn_out=(dmn_sct **)nco_malloc((flg_rdd ? nbr_dmn_xtr : nbr_dmn_xtr-dmn_avg_nbr)*sizeof(dmn_sct *)); nbr_dmn_out=0; for(idx=0;idx<nbr_dmn_xtr;idx++){ for(idx_avg=0;idx_avg<dmn_avg_nbr;idx_avg++){ if(!strcmp(dmn_avg_lst[idx_avg].nm,dim[idx]->nm)) break; } /* end loop over idx_avg */ if(idx_avg == dmn_avg_nbr || flg_rdd){ /* Output list comprises non-averaged and, if specified, degenerate dimensions */ dmn_out[nbr_dmn_out]=nco_dmn_dpl(dim[idx]); (void)nco_dmn_xrf(dim[idx],dmn_out[nbr_dmn_out]); if(idx_avg != dmn_avg_nbr && flg_rdd){ /* Cut degenerate dimensions down to size */ dmn_out[nbr_dmn_out]->cnt=1L; dmn_out[nbr_dmn_out]->srt=dmn_out[nbr_dmn_out]->end=0L; } /* !flg_rdd */ nbr_dmn_out++; } /* end if idx_avg */ } /* end loop over idx_xtr */ /* Dimension average list no longer needed */ dmn_avg_lst=nco_nm_id_lst_free(dmn_avg_lst,dmn_avg_nbr); if(nbr_dmn_out != (flg_rdd ? nbr_dmn_xtr : nbr_dmn_xtr-dmn_avg_nbr)){ (void)fprintf(fp_stdout,"%s: ERROR nbr_dmn_out != %s\n",nco_prg_nm,(flg_rdd) ? "nbr_dmn_xtr" : "nbr_dmn_xtr-dmn_avg_nbr"); nco_exit(EXIT_FAILURE); } /* end if */ } /* dmn_avg_nbr <= 0 */ /* Fill-in variable structure list for all extracted variables */ var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr); var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over idx */ /* Extraction list no longer needed */ xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,dmn_avg,dmn_avg_nbr,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc); /* We now have final list of variables to extract. Phew. */ if(nco_dbg_lvl >= nco_dbg_var){ for(idx=0;idx<xtr_nbr;idx++) (void)fprintf(stderr,"var[%d]->nm = %s, ->id=[%d]\n",idx,var[idx]->nm,var[idx]->id); for(idx=0;idx<nbr_var_fix;idx++) (void)fprintf(stderr,"var_fix[%d]->nm = %s, ->id=[%d]\n",idx,var_fix[idx]->nm,var_fix[idx]->id); for(idx=0;idx<nbr_var_prc;idx++) (void)fprintf(stderr,"var_prc[%d]->nm = %s, ->id=[%d]\n",idx,var_prc[idx]->nm,var_prc[idx]->id); } /* end if */ #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); if(nco_dbg_lvl >= nco_dbg_sbr) (void)fprintf(stderr,"Input, output file IDs = %d, %d\n",in_id,out_id); /* Copy all global attributes */ (void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); #ifdef ENABLE_MPI /* Initialize MPI task information */ if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr); #endif /* !ENABLE_MPI */ /* Define dimensions in output file */ (void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_out); /* Define variables in output file, copy their attributes */ (void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,dmn_out,nbr_dmn_out,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl); #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* Add new missing values to output file while in define mode */ if(msk_nm){ for(idx=0;idx<nbr_var_prc;idx++){ /* Define for var_prc_out because mss_val for var_prc will be overwritten in nco_var_mtd_refresh */ if(!var_prc_out[idx]->has_mss_val){ var_prc_out[idx]->has_mss_val=True; var_prc_out[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr) #endif /* !ENABLE_MPI */ (void)nco_put_att(out_id,var_prc_out[idx]->id,nco_mss_val_sng_get(),var_prc_out[idx]->type,1,var_prc_out[idx]->mss_val.vp); } /* end if */ } /* end for */ } /* end if */ #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ #endif /* !ENABLE_MPI */ /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ #ifdef ENABLE_MPI } /* prc_rnk != rnk_mgr */ /* Manager obtains output filename and broadcasts to workers */ if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp); MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,MPI_COMM_WORLD); if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char)); MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,MPI_COMM_WORLD); if(prc_rnk == rnk_mgr){ /* MPI manager code */ TKN_WRT_FREE=False; #endif /* !ENABLE_MPI */ /* Copy variable data for non-processed variables */ (void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix); #ifdef ENABLE_MPI /* Close output file so workers can open it */ nco_close(out_id); TKN_WRT_FREE=True; } /* prc_rnk != rnk_mgr */ #endif /* !ENABLE_MPI */ /* Assign zero to start and unity to stride vectors in output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Close first input netCDF file */ nco_close(in_id); /* Loop over input files (not currently used, fl_nbr == 1) */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\nInput file %d is %s; ",fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"local file %s:\n",fl_in); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); in_id=in_id_arr[0]; /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* Find weighting variable in input file */ if(wgt_nm){ int wgt_id; rcd=nco_inq_varid(in_id,wgt_nm,&wgt_id); /* fxm: TODO #111 core dump if wgt has dimension not in extraction list */ wgt=nco_var_fll(in_id,wgt_id,wgt_nm,dim,nbr_dmn_xtr); /* Retrieve weighting variable */ /* NB: nco_var_get() with same nc_id contains OpenMP critical region */ (void)nco_var_get(in_id,wgt); /* fxm: Perhaps should allocate default tally array for wgt here That way, when wgt conforms to the first var_prc_out and it therefore does not get a tally array copied by nco_var_dpl() in nco_var_cnf_dmn(), it will at least have space for a tally array. TODO #114. */ } /* end if */ /* Find mask variable in input file */ if(msk_nm){ int msk_id; rcd=nco_inq_varid(in_id,msk_nm,&msk_id); /* fxm: TODO #111 core dump if msk has dimension not in extraction list */ msk=nco_var_fll(in_id,msk_id,msk_nm,dim,nbr_dmn_xtr); /* Retrieve mask variable */ /* NB: nco_var_get() with same nc_id contains OpenMP critical region */ (void)nco_var_get(in_id,msk); } /* end if */ /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; #ifdef ENABLE_MPI if(prc_rnk == rnk_mgr){ /* MPI manager code */ /* Compensate for incrementing on each worker's first message */ var_wrt_nbr=-prc_nbr+1; idx=0; /* While variables remain to be processed or written... */ while(var_wrt_nbr < nbr_var_prc){ /* Receive message from any worker */ MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt); /* Obtain MPI message tag type */ msg_tag_typ=mpi_stt.MPI_TAG; /* Get sender's prc_rnk */ rnk_wrk=wrk_id_bfr[0]; /* Allocate next variable, if any, to worker */ if(msg_tag_typ == msg_tag_wrk_rqs){ var_wrt_nbr++; /* [nbr] Number of variables written */ /* Workers close output file before sending next msg_tag_wrk_rqs */ /* fxm csz 20050924: Safe? Can't other process have write token here? */ TKN_WRT_FREE=True; if(idx > nbr_var_prc-1){ msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */ msg_bfr[1]=out_id; /* Output file ID */ }else{ /* Tell requesting worker to allocate space for next variable */ msg_bfr[0]=idx; /* [idx] Variable to be processed */ msg_bfr[1]=out_id; /* Output file ID */ msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */ /* Point to next variable on list */ idx++; } /* endif idx */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD); /* msg_tag_typ != msg_tag_wrk_rqs */ }else if(msg_tag_typ == msg_tag_tkn_wrt_rqs){ /* Allocate token if free, else ask worker to try later */ if(TKN_WRT_FREE){ TKN_WRT_FREE=False; msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */ }else{ msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */ } /* !TKN_WRT_FREE */ MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); } /* msg_tag_typ != msg_tag_tkn_wrt_rqs */ } /* end while var_wrt_nbr < nbr_var_prc */ }else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */ wrk_id_bfr[0]=prc_rnk; while(1){ /* While work remains... */ /* Send msg_tag_wrk_rqs */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD); /* Receive msg_tag_wrk_rsp */ MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,0,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt); idx=msg_bfr[0]; out_id=msg_bfr[1]; if(idx == idx_all_wrk_ass) break; else{ var_prc_out[idx]->id=msg_bfr[2]; /* Process this variable same as UP code */ #else /* !ENABLE_MPI */ #ifdef _OPENMP /* OpenMP notes: firstprivate(): msk_out and wgt_out must be NULL on first call to nco_var_cnf_dmn() shared(): msk and wgt are not altered within loop private(): wgt_avg does not need initialization */ #pragma omp parallel for default(none) firstprivate(DO_CONFORM_MSK,DO_CONFORM_WGT,ddra_info,msk_out,wgt_out) private(idx,in_id,wgt_avg) shared(MULTIPLY_BY_TALLY,MUST_CONFORM,NRM_BY_DNM,WGT_MSK_CRD_VAR,nco_dbg_lvl,dmn_avg,dmn_avg_nbr,flg_ddra,flg_rdd,in_id_arr,msk,msk_nm,msk_val,nbr_var_prc,nco_op_typ,op_typ_rlt,out_id,nco_prg_nm,rcd,var_prc,var_prc_out,wgt,wgt_nm) #endif /* !_OPENMP */ /* UP and SMP codes main loop over variables */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ #endif /* !ENABLE_MPI */ in_id=in_id_arr[omp_get_thread_num()]; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Allocate and, if necessary, initialize accumulation space for all processed variables */ var_prc_out[idx]->sz=var_prc[idx]->sz; /* if((var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_malloc_flg(var_prc_out[idx]->sz*sizeof(long int))) == NULL){*/ /* fxm: verify that var_prc->tally is not needed */ if((var_prc_out[idx]->tally=(long *)nco_malloc_flg(var_prc_out[idx]->sz*sizeof(long int))) == NULL){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%ld bytes for tally buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(long int)sizeof(long int),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally); if((var_prc_out[idx]->val.vp=(void *)nco_malloc_flg(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type))) == NULL){ (void)fprintf(fp_stdout,"%s: ERROR Unable to malloc() %ld*%lu bytes for value buffer for variable %s in main()\n",nco_prg_nm_get(),var_prc_out[idx]->sz,(unsigned long)nco_typ_lng(var_prc_out[idx]->type),var_prc_out[idx]->nm); nco_exit(EXIT_FAILURE); } /* end if err */ (void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val); (void)nco_var_mtd_refresh(in_id,var_prc[idx]); /* Retrieve variable from disk into memory */ if(False) (void)fprintf(fp_stdout,"%s: DEBUG: fxm TODO nco354 About to nco_var_get() %s\n",nco_prg_nm,var_prc[idx]->nm); /* NB: nco_var_get() with same nc_id contains OpenMP critical region */ (void)nco_var_get(in_id,var_prc[idx]); if(False) (void)fprintf(fp_stdout,"%s: DEBUG: fxm TODO nco354 Finished nco_var_get() %s\n",nco_prg_nm,var_prc[idx]->nm); /* Convert char, short, long, int, and float types to doubles before arithmetic */ var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); if(msk_nm && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ msk_out=nco_var_cnf_dmn(var_prc[idx],msk,msk_out,MUST_CONFORM,&DO_CONFORM_MSK); /* If msk and var did not conform then do not mask var! */ if(DO_CONFORM_MSK){ msk_out=nco_var_cnf_typ(var_prc[idx]->type,msk_out); /* mss_val for var_prc has been overwritten in nco_var_mtd_refresh() */ if(!var_prc[idx]->has_mss_val){ var_prc[idx]->has_mss_val=True; var_prc[idx]->mss_val=nco_mss_val_mk(var_prc[idx]->type); } /* end if */ /* Mask by changing variable to missing value where condition is false */ (void)nco_var_msk(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,msk_val,op_typ_rlt,msk_out->val,var_prc[idx]->val); } /* end if */ } /* end if */ /* Perform non-linear transformations before weighting */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_avgsqr: /* Square variable before weighting */ case nco_op_rms: /* Square variable before weighting */ case nco_op_rmssdn: /* Square variable before weighting */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,var_prc[idx]->val); break; default: /* All other operations are linear, do nothing to them yet */ break; } /* end case */ } /* var_prc[idx]->is_crd_var */ if(wgt_nm && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* fxm: nco_var_cnf_dmn() has bug where it does not allocate tally array for weights that do already conform to var_prc. TODO #114. */ wgt_out=nco_var_cnf_dmn(var_prc[idx],wgt,wgt_out,MUST_CONFORM,&DO_CONFORM_WGT); if(DO_CONFORM_WGT){ wgt_out=nco_var_cnf_typ(var_prc[idx]->type,wgt_out); /* Weight after any initial non-linear operation so, e.g., variable is squared but not weights */ /* Weight variable by taking product of weight and variable */ (void)nco_var_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,wgt_out->val,var_prc[idx]->val); } /* end if weights conformed */ } /* end if weight was specified and then tested for conformance */ /* Copy (masked) (weighted) values from var_prc to var_prc_out */ (void)memcpy((void *)(var_prc_out[idx]->val.vp),(void *)(var_prc[idx]->val.vp),var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type)); /* 20050516: fxm: destruction of var_prc_out in nco_var_avg() leaves dangling pointers in var_out? */ /* Reduce variable over specified dimensions (tally array is set here) NB: var_prc_out[idx] is new, so corresponding var_out[idx] is dangling */ var_prc_out[idx]=nco_var_avg(var_prc_out[idx],dmn_avg,dmn_avg_nbr,nco_op_typ,flg_rdd,&ddra_info); /* var_prc_out[idx]->val now holds numerator of averaging expression documented in NCO Users Guide Denominator is also tricky due to sundry normalization options These logical switches are VERY tricky---be careful modifying them */ if(NRM_BY_DNM && DO_CONFORM_WGT && (!var_prc[idx]->is_crd_var || WGT_MSK_CRD_VAR)){ /* Duplicate wgt_out as wgt_avg so that wgt_out is not contaminated by any averaging operation and may be reused on next variable. Free wgt_avg after each use but continue to reuse wgt_out */ wgt_avg=nco_var_dpl(wgt_out); if(var_prc[idx]->has_mss_val){ double mss_val_dbl=double_CEWI; /* Set denominator to missing value at all locations where variable is missing value If this is accomplished by setting weight to missing value wherever variable is missing value then weight must not be re-used by next variable (which might conform but have missing values in different locations) This is one good reason to copy wgt_out into disposable wgt_avg for each new variable */ /* First, make sure wgt_avg has same missing value as variable */ (void)nco_mss_val_cp(var_prc[idx],wgt_avg); /* Copy missing value into double precision variable */ switch(wgt_avg->type){ case NC_FLOAT: mss_val_dbl=wgt_avg->mss_val.fp[0]; break; case NC_DOUBLE: mss_val_dbl=wgt_avg->mss_val.dp[0]; break; case NC_INT: mss_val_dbl=wgt_avg->mss_val.ip[0]; break; case NC_SHORT: mss_val_dbl=wgt_avg->mss_val.sp[0]; break; case NC_USHORT: mss_val_dbl=wgt_avg->mss_val.usp[0]; break; case NC_UINT: mss_val_dbl=wgt_avg->mss_val.uip[0]; break; case NC_INT64: mss_val_dbl=wgt_avg->mss_val.i64p[0]; break; case NC_UINT64: mss_val_dbl=wgt_avg->mss_val.ui64p[0]; break; case NC_BYTE: mss_val_dbl=wgt_avg->mss_val.bp[0]; break; case NC_UBYTE: mss_val_dbl=wgt_avg->mss_val.cp[0]; break; case NC_CHAR: mss_val_dbl=wgt_avg->mss_val.cp[0]; break; case NC_STRING: break; /* Do nothing */ default: nco_dfl_case_nc_type_err(); break; } /* end switch */ /* Second, mask wgt_avg where variable is missing value */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,mss_val_dbl,nco_op_ne,var_prc[idx]->val,wgt_avg->val); } /* endif weight must be checked for missing values */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); if(msk_nm && DO_CONFORM_MSK){ /* Must mask weight in same fashion as variable was masked If msk and var did not conform then do not mask wgt Ensure wgt_avg has a missing value */ if(!wgt_avg->has_mss_val){ wgt_avg->has_mss_val=True; wgt_avg->mss_val=nco_mss_val_mk(wgt_avg->type); } /* end if */ /* Mask by changing weight to missing value where condition is false */ (void)nco_var_msk(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,msk_val,op_typ_rlt,msk_out->val,wgt_avg->val); } /* endif weight must be masked */ /* fxm: temporary kludge to make sure weight has tally space wgt_avg may lack valid tally array in ncwa because wgt_avg is created, sometimes, before the tally array for var_prc_out[idx] is created. When this occurs the nco_var_dpl() call in nco_var_cnf_dmn() does not copy tally array into wgt_avg. See related note about this above. TODO #114.*/ if(wgt_avg->sz > 0) if((wgt_avg->tally=(long *)nco_realloc(wgt_avg->tally,wgt_avg->sz*sizeof(long int))) == NULL){ (void)fprintf(fp_stdout,"%s: ERROR Unable to realloc() %ld*%ld bytes for tally buffer for weight %s in main()\n",nco_prg_nm_get(),wgt_avg->sz,(long int)sizeof(long int),wgt_avg->nm); nco_exit(EXIT_FAILURE); } /* end if */ /* Average weight over specified dimensions (tally array is set here) */ wgt_avg=nco_var_avg(wgt_avg,dmn_avg,dmn_avg_nbr,nco_op_avg,flg_rdd,&ddra_info); if(MULTIPLY_BY_TALLY){ /* NB: Currently this is not implemented */ /* Multiply numerator (weighted sum of variable) by tally We deviously accomplish this by dividing denominator by tally */ (void)nco_var_nrm(wgt_avg->type,wgt_avg->sz,wgt_avg->has_mss_val,wgt_avg->mss_val,wgt_avg->tally,wgt_avg->val); } /* endif */ /* Rather complex conditional statement is shorter than switch() */ if( /* Normalize by weighted tally if .... */ (nco_op_typ != nco_op_min) && /* ...operation is not min() and... */ (nco_op_typ != nco_op_max) && /* ...operation is not max() and... */ (nco_op_typ != nco_op_ttl || /* ...operation is not ttl() or... */ var_prc[idx]->is_crd_var) /* ...variable is a coordinate */ ){ /* Divide numerator by masked, averaged, weights */ (void)nco_var_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,wgt_avg->val,var_prc_out[idx]->val); } /* endif */ /* Free wgt_avg, but keep wgt_out, after each use */ if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); /* End of branch for normalization when weights were specified */ }else if(NRM_BY_DNM){ /* Branch for normalization when no weights were specified Normalization is just due to tally */ if(var_prc[idx]->is_crd_var){ /* Return linear averages of coordinates unless computing extrema Prevent coordinate variables from encountering nco_var_nrm_sdn() */ if((nco_op_typ != nco_op_min) && (nco_op_typ != nco_op_max)) (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); }else{ /* !var_prc[idx]->is_crd_var */ switch(nco_op_typ){ case nco_op_avg: /* Normalize sum by tally to create mean */ case nco_op_sqravg: /* Normalize sum by tally to create mean */ case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */ case nco_op_rms: /* Normalize sum of squares by tally to create mean square */ case nco_op_sqrt: /* Normalize sum by tally to create mean */ (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */ (void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val); break; case nco_op_min: /* Minimum is already in buffer, do nothing */ case nco_op_max: /* Maximum is already in buffer, do nothing */ case nco_op_ttl: /* Total is already in buffer, do nothing */ break; default: (void)fprintf(fp_stdout,"%s: ERROR Illegal nco_op_typ in non-weighted normalization\n",nco_prg_nm); nco_exit(EXIT_FAILURE); break; } /* end switch */ } /* !var_prc[idx]->is_crd_var */ }else if(!NRM_BY_DNM){ /* Normalization has been turned off by user, we are done */ ; }else{ (void)fprintf(fp_stdout,"%s: ERROR Unforeseen logical branch in main()\n",nco_prg_nm); nco_exit(EXIT_FAILURE); } /* end if */ /* Some non-linear operations require additional processing */ if(!var_prc[idx]->is_crd_var){ switch(nco_op_typ){ case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */ (void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val); break; case nco_op_sqrt: /* Take root of mean to create root mean */ case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */ case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */ (void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val); break; default: break; } /* end switch */ } /* var_prc[idx]->is_crd_var */ /* Free tally buffer */ var_prc_out[idx]->tally=(long *)nco_free(var_prc_out[idx]->tally); /* Revert any arithmetic promotion but leave unpacked (for now) */ var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]); #ifdef ENABLE_MPI /* Obtain token and prepare to write */ while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */ wrk_id_bfr[0]=prc_rnk; MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD); MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt); tkn_wrt_rsp=msg_bfr[0]; /* Wait then re-send request */ if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break; } /* end while loop waiting for write token */ /* Worker has token---prepare to write */ if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){ if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE; rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id); /* Set chunksize parameters */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr); /* Turn-off default filling behavior to enhance efficiency */ nco_set_fill(out_id,NC_NOFILL,&fll_md_old); #endif /* !ENABLE_MPI */ #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ /* Common code for UP, SMP, and MPI */ { /* begin OpenMP critical */ /* Copy average to output file then free averaging buffer */ if(var_prc_out[idx]->nbr_dim == 0){ (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); }else{ /* end if variable is scalar */ (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end if variable is array */ } /* end OpenMP critical */ /* fxm: TODO nco722 DDRA diagnostics should work in MPI mode */ if(flg_ddra){ /* DDRA diagnostics Usage: mpncwa -O -C --mdl -a lat,lon,time -w lat ~/nco/data/in.nc ~/foo.nc mpncwa -O -C --mdl -a lat,lon -w lat ${DATA}/nco_bm/stl_5km.nc ~/foo.nc mpncwa -O -C --mdl -a lat,lon,time -w lat ${DATA}/nco_bm/gcm_T85.nc ~/foo.nc */ /* Assign remaining input for DDRA diagnostics */ ddra_info.lmn_nbr=var_prc[idx]->sz; /* [nbr] Variable size */ if(wgt) ddra_info.lmn_nbr_wgt=wgt->sz; /* [nbr] Weight size */ ddra_info.nco_op_typ=nco_op_typ; /* [enm] Operation type */ ddra_info.rnk_var=var_prc[idx]->nbr_dim; /* I [nbr] Variable rank (in input file) */ if(wgt) ddra_info.rnk_wgt=wgt->nbr_dim; /* [nbr] Rank of weight */ ddra_info.var_idx=idx; /* [enm] Index */ ddra_info.wrd_sz=nco_typ_lng(var_prc[idx]->type); /* [B] Bytes per element */ /* DDRA diagnostics */ rcd+=nco_ddra /* [fnc] Count operations */ (var_prc[idx]->nm, /* I [sng] Variable name */ wgt_nm, /* I [sng] Weight name */ &ddra_info); /* I [sct] DDRA information */ } /* !flg_ddra */ /* Free current output buffer */ var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); #ifdef ENABLE_MPI /* Close output file and increment written counter */ nco_close(out_id); var_wrt_nbr++; } /* endif tkn_wrt_rqs_xcp */ } /* end else !idx_all_wrk_ass */ } /* end while loop requesting work/token */ } /* endif Worker */ #else /* !ENABLE_MPI */ } /* end (OpenMP parallel for) loop over idx */ #endif /* !ENABLE_MPI */ if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); #ifdef ENABLE_MPI /* Manager moves output file (closed by workers) from temporary to permanent location */ if(prc_rnk == rnk_mgr) (void)nco_fl_mv(fl_out_tmp,fl_out); if(prc_rnk == rnk_mgr) /* This if statement conditions nco_fl_rm() below */ #else /* !ENABLE_MPI */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); #endif /* end !ENABLE_MPI */ /* Remove local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); } /* end loop over fl_idx */ /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* ncwa-specific memory */ if(dmn_avg_nbr > 0) dmn_avg=(dmn_sct **)nco_free(dmn_avg); if(msk) msk=nco_var_free(msk); if(msk_nm) msk_nm=(char *)nco_free(msk_nm); if(msk_out) msk_out=nco_var_free(msk_out); if(msk_cnd_sng) msk_cnd_sng=(char *)nco_free(msk_cnd_sng); if(wgt) wgt=nco_var_free(wgt); if(wgt_avg) wgt_avg=nco_var_free(wgt_avg); if(wgt_nm) wgt_nm=(char *)nco_free(wgt_nm); if(wgt_out) wgt_out=nco_var_free(wgt_out); /* NCO-generic clean-up */ /* Free individual strings/arrays */ if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); /* Free lists of strings */ if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); /* Free limits */ for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); if(lmt_nbr > 0) lmt=nco_lmt_lst_free(lmt,lmt_nbr); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_out > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_out); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); /* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */ if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix); if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc); var_prc=(var_sct **)nco_free(var_prc); var_fix=(var_sct **)nco_free(var_fix); var_out=(var_sct **)nco_free(var_out); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
dpado.202001231600.no_bp_and_limit_batches.h
// // Created by Zhen Peng on 1/6/20. // #ifndef PADO_DPADO_H #define PADO_DPADO_H #include <vector> //#include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> //#include <xmmintrin.h> #include <immintrin.h> #include <bitset> #include <math.h> #include <fstream> #include <omp.h> #include "globals.h" #include "dglobals.h" #include "dgraph.h" namespace PADO { template <VertexID BATCH_SIZE = 1024> class DistBVCPLL { private: static const VertexID BITPARALLEL_SIZE = 50; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { // struct Batch { // VertexID batch_id; // Batch ID // VertexID start_index; // Index to the array distances where the batch starts // VertexID size; // Number of distances element in this batch // // Batch() = default; // Batch(VertexID batch_id_, VertexID start_index_, VertexID size_): // batch_id(batch_id_), start_index(start_index_), size(size_) // { } // }; struct DistanceIndexType { VertexID start_index; // Index to the array vertices where the same-distance vertices start VertexID size; // Number of the same-distance vertices UnweightedDist dist; // The real distance DistanceIndexType() = default; DistanceIndexType(VertexID start_index_, VertexID size_, UnweightedDist dist_): start_index(start_index_), size(size_), dist(dist_) { } }; // Bit-parallel Labels UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} // std::vector<Batch> batches; // Batch info std::vector<DistanceIndexType> distances; // Distance info std::vector<VertexID> vertices; // Vertices in the label, presented as temporary ID size_t get_size_in_bytes() const { return sizeof(bp_dist) + sizeof(bp_sets) + // batches.size() * sizeof(Batch) + distances.size() * sizeof(DistanceIndexType) + vertices.size() * sizeof(VertexID); } void clean_all_indices() { std::vector<DistanceIndexType>().swap(distances); std::vector<VertexID>().swap(vertices); } }; //__attribute__((aligned(64))); struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, it helps update_label_indices() and can be reset along with other indicator elements. // std::bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // If the Batch structure is not used, the indicator could just be BATCH_SIZE long. std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE, 0); // std::vector<uint8_t> indicator = std::vector<uint8_t>(BATCH_SIZE + 1, 0); // Use a queue to store candidates std::vector<VertexID> candidates_que = std::vector<VertexID>(BATCH_SIZE); VertexID end_candidates_que = 0; std::vector<uint8_t> is_candidate = std::vector<uint8_t>(BATCH_SIZE, 0); void indicator_reset() { std::fill(indicator.begin(), indicator.end(), 0); } }; //__attribute__((aligned(64))); // Type of Bit-Parallel Label struct BPLabelType { UnweightedDist bp_dist[BITPARALLEL_SIZE] = { 0 }; uint64_t bp_sets[BITPARALLEL_SIZE][2] = { {0} }; // [0]: S^{-1}, [1]: S^{0} }; // Type of Label Message Unit, for initializing distance table struct LabelTableUnit { VertexID root_id; VertexID label_global_id; UnweightedDist dist; LabelTableUnit() = default; LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : root_id(r), label_global_id(l), dist(d) {} }; // Type of BitParallel Label Message Unit for initializing bit-parallel labels struct MsgBPLabel { VertexID r_root_id; UnweightedDist bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; MsgBPLabel() = default; MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) : r_root_id(r) { memcpy(bp_dist, dist, sizeof(bp_dist)); memcpy(bp_sets, sets, sizeof(bp_sets)); } }; VertexID num_v = 0; VertexID num_masters = 0; // VertexID BATCH_SIZE = 0; int host_id = 0; int num_hosts = 0; MPI_Datatype V_ID_Type; std::vector<IndexType> L; inline void bit_parallel_push_labels( const DistGraph &G, VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, UnweightedDist iter); inline void bit_parallel_labeling( const DistGraph &G, std::vector<uint8_t> &used_bp_roots); // inline void bit_parallel_push_labels( // const DistGraph &G, // VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // UnweightedDist iter); // inline void bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots); inline void batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, const VertexID roots_size, const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated); // std::vector<bool> &once_candidated); inline VertexID initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots); // inline void push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter); inline void local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // std::vector<bool> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); inline void local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter); // inline void local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter); inline void schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter); inline bool distance_query( VertexID cand_root_id, VertexID v_id, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter); inline void insert_label_only_seq( VertexID cand_root_id, // VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send); // UnweightedDist iter); inline void insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send); inline void update_label_indices( const VertexID v_id, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter); inline void reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue); // template <typename E_T, typename F> // inline void every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun); template <typename E_T> inline void one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv); // // Function: get the destination host id which is i hop from this host. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_me_host_id(int hop) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // return (host_id + hop + num_hosts) % num_hosts; // } // // Function: get the destination host id which is i hop from the root. // // For example, 1 hop from host 2 is host 0 (assume total 3 hosts); // // -1 hop from host 0 is host 2. // int hop_2_root_host_id(int hop, int root) const // { // assert(hop >= -(num_hosts - 1) && hop < num_hosts && hop != 0); // assert(root >= 0 && root < num_hosts); // return (root + hop + num_hosts) % num_hosts; // } size_t get_index_size() { size_t bytes = 0; for (VertexID v_i = 0; v_i < num_masters; ++v_i) { bytes += L[v_i].get_size_in_bytes(); } return bytes; } // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // uint64_t normal_check_count = 0; // uint64_t total_candidates_num = 0; // uint64_t set_candidates_num = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //L2CacheMissRate cache_miss; // double message_time = 0; // double bp_labeling_time = 0; // double initializing_time = 0; // double scatter_time = 0; // double gather_time = 0; // double clearup_time = 0; // TotalInstructsExe candidating_ins_count; // TotalInstructsExe adding_ins_count; // TotalInstructsExe bp_labeling_ins_count; // TotalInstructsExe bp_checking_ins_count; // TotalInstructsExe dist_query_ins_count; // End test public: // std::pair<uint64_t, uint64_t> length_larger_than_16 = std::make_pair(0, 0); DistBVCPLL() = default; explicit DistBVCPLL( const DistGraph &G); // UnweightedDist dist_distance_query_pair( // VertexID a_global, // VertexID b_global, // const DistGraph &G); }; // class DistBVCPLL template <VertexID BATCH_SIZE> DistBVCPLL<BATCH_SIZE>:: DistBVCPLL( const DistGraph &G) { num_v = G.num_v; assert(num_v >= BATCH_SIZE); num_masters = G.num_masters; host_id = G.host_id; // { // if (1 == host_id) { // volatile int i = 0; // while (i == 0) { // sleep(5); // } // } // } num_hosts = G.num_hosts; V_ID_Type = G.V_ID_Type; // L.resize(num_v); L.resize(num_masters); VertexID remainer = num_v % BATCH_SIZE; VertexID b_i_bound = num_v / BATCH_SIZE; std::vector<uint8_t> used_bp_roots(num_v, 0); //cache_miss.measure_start(); double time_labeling = -WallTimer::get_time_mark(); // bp_labeling_time -= WallTimer::get_time_mark(); bit_parallel_labeling(G, used_bp_roots); // bp_labeling_time += WallTimer::get_time_mark(); {//test //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("host_id: %u bp_labeling_finished.\n", host_id); } //#endif } std::vector<VertexID> active_queue(num_masters); // Any vertex v who is active should be put into this queue. VertexID end_active_queue = 0; std::vector<uint8_t> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. // std::vector<bool> is_active(num_masters, false);// is_active[v] is true means vertex v is in the active queue. std::vector<VertexID> got_candidates_queue(num_masters); // Any vertex v who got candidates should be put into this queue. VertexID end_got_candidates_queue = 0; std::vector<uint8_t> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue // std::vector<bool> got_candidates(num_masters, false); // got_candidates[v] is true means vertex v is in the queue got_candidates_queue std::vector<ShortIndex> short_index(num_masters); std::vector< std::vector<UnweightedDist> > dist_table(BATCH_SIZE, std::vector<UnweightedDist>(num_v, MAX_UNWEIGHTED_DIST)); std::vector<VertexID> once_candidated_queue(num_masters); // if short_index[v].indicator.any() is true, v is in the queue. // Used mainly for resetting short_index[v].indicator. VertexID end_once_candidated_queue = 0; std::vector<uint8_t> once_candidated(num_masters, false); // std::vector<bool> once_candidated(num_masters, false); std::vector< std::vector<VertexID> > recved_dist_table(BATCH_SIZE); // Some distances are from other hosts. This is used to reset the dist_table. std::vector<BPLabelType> bp_labels_table(BATCH_SIZE); // All roots' bit-parallel labels //printf("b_i_bound: %u\n", b_i_bound);//test for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { {// Batch number limit if (64 == b_i) { remainer = 0; break; } } { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i);//test } //#endif } batch_process( G, // b_i, b_i * BATCH_SIZE, BATCH_SIZE, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // exit(EXIT_SUCCESS); //test } if (remainer != 0) { { //#ifdef DEBUG_MESSAGES_ON if (0 == host_id) { printf("b_i: %u\n", b_i_bound);//test } //#endif } batch_process( G, // b_i_bound, b_i_bound * BATCH_SIZE, remainer, // L, used_bp_roots, active_queue, end_active_queue, got_candidates_queue, end_got_candidates_queue, short_index, dist_table, recved_dist_table, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); } time_labeling += WallTimer::get_time_mark(); //cache_miss.measure_stop(); // Test setlocale(LC_NUMERIC, ""); if (0 == host_id) { printf("BATCH_SIZE: %u\n", BATCH_SIZE); printf("BP_Size: %u\n", BITPARALLEL_SIZE); } {// Total Number of Labels EdgeID local_num_labels = 0; for (VertexID v_global = 0; v_global < num_v; ++v_global) { if (G.get_master_host_id(v_global) != host_id) { continue; } local_num_labels += L[G.get_local_vertex_id(v_global)].vertices.size(); } EdgeID global_num_labels; MPI_Allreduce(&local_num_labels, &global_num_labels, 1, MPI_Instance::get_mpi_datatype<EdgeID>(), MPI_SUM, MPI_COMM_WORLD); // printf("host_id: %u local_num_labels: %lu %.2f%%\n", host_id, local_num_labels, 100.0 * local_num_labels / global_num_labels); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("Global_num_labels: %lu average: %f\n", global_num_labels, 1.0 * global_num_labels / num_v); } // VertexID local_num_batches = 0; // VertexID local_num_distances = 0; //// double local_avg_distances_per_batches = 0; // for (VertexID v_global = 0; v_global < num_v; ++v_global) { // if (G.get_master_host_id(v_global) != host_id) { // continue; // } // VertexID v_local = G.get_local_vertex_id(v_global); // local_num_batches += L[v_local].batches.size(); // local_num_distances += L[v_local].distances.size(); //// double avg_d_p_b = 0; //// for (VertexID i_b = 0; i_b < L[v_local].batches.size(); ++i_b) { //// avg_d_p_b += L[v_local].batches[i_b].size; //// } //// avg_d_p_b /= L[v_local].batches.size(); //// local_avg_distances_per_batches += avg_d_p_b; // } //// local_avg_distances_per_batches /= num_masters; //// double local_avg_batches = local_num_batches * 1.0 / num_masters; //// double local_avg_distances = local_num_distances * 1.0 / num_masters; // uint64_t global_num_batches = 0; // uint64_t global_num_distances = 0; // MPI_Allreduce( // &local_num_batches, // &global_num_batches, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_batches /= num_hosts; // MPI_Allreduce( // &local_num_distances, // &global_num_distances, // 1, // MPI_UINT64_T, // MPI_SUM, // MPI_COMM_WORLD); //// global_avg_distances /= num_hosts; // double global_avg_d_p_b = global_num_distances * 1.0 / global_num_batches; // double global_avg_l_p_d = global_num_labels * 1.0 / global_num_distances; // double global_avg_batches = global_num_batches / num_v; // double global_avg_distances = global_num_distances / num_v; //// MPI_Allreduce( //// &local_avg_distances_per_batches, //// &global_avg_d_p_b, //// 1, //// MPI_DOUBLE, //// MPI_SUM, //// MPI_COMM_WORLD); //// global_avg_d_p_b /= num_hosts; // MPI_Barrier(MPI_COMM_WORLD); // if (0 == host_id) { // printf("global_avg_batches: %f " // "global_avg_distances: %f " // "global_avg_distances_per_batch: %f " // "global_avg_labels_per_distance: %f\n", // global_avg_batches, // global_avg_distances, // global_avg_d_p_b, // global_avg_l_p_d); // } } // printf("BP_labeling: %f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); // printf("Initializing: %f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("distance_query_time: %f %.2f%%\n", distance_query_time, distance_query_time / time_labeling * 100); // uint64_t total_check_count = bp_hit_count + normal_check_count; // printf("total_check_count: %'llu\n", total_check_count); // printf("bp_hit_count: %'llu %.2f%%\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("normal_check_count: %'llu %.2f%%\n", normal_check_count, normal_check_count * 100.0 / total_check_count); // printf("total_candidates_num: %'llu set_candidates_num: %'llu %.2f%%\n", // total_candidates_num, // set_candidates_num, // set_candidates_num * 100.0 / total_candidates_num); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); //cache_miss.print(); // printf("Candidating: "); candidating_ins_count.print(); // printf("Adding: "); adding_ins_count.print(); // printf("BP_Labeling: "); bp_labeling_ins_count.print(); // printf("BP_Checking: "); bp_checking_ins_count.print(); // printf("distance_query: "); dist_query_ins_count.print(); // printf("num_hosts: %u host_id: %u\n" // "Local_labeling_time: %.2f seconds\n" // "bp_labeling_time: %.2f %.2f%%\n" // "initializing_time: %.2f %.2f%%\n" // "scatter_time: %.2f %.2f%%\n" // "gather_time: %.2f %.2f%%\n" // "clearup_time: %.2f %.2f%%\n" // "message_time: %.2f %.2f%%\n", // num_hosts, host_id, // time_labeling, // bp_labeling_time, 100.0 * bp_labeling_time / time_labeling, // initializing_time, 100.0 * initializing_time / time_labeling, // scatter_time, 100.0 * scatter_time / time_labeling, // gather_time, 100.0 * gather_time / time_labeling, // clearup_time, 100.0 * clearup_time / time_labeling, // message_time, 100.0 * message_time / time_labeling); double global_time_labeling; MPI_Allreduce(&time_labeling, &global_time_labeling, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (0 == host_id) { printf("num_hosts: %d " "Global_labeling_time: %.2f seconds\n", num_hosts, global_time_labeling); } // End test } //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_labeling( // const DistGraph &G, // std::vector<uint8_t> &used_bp_roots) //{ //// VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; // // std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_v); // active queue // std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // used_bp_roots[r] = true; // // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. //// VertexID i_bound = G.vertices[r] - 1; //// VertexID i_start = i_bound + G.out_degrees[r]; //// for (VertexID i = i_start; i > i_bound; --i) { // //int i_bound = G.vertices[r]; // //int i_start = i_bound + G.out_degrees[r] - 1; // //for (int i = i_start; i >= i_bound; --i) { // VertexID d_i_bound = G.local_out_degrees[r]; // EdgeID i_start = G.vertices_idx[r] + d_i_bound - 1; // for (VertexID d_i = 0; d_i < d_i_bound; ++d_i) { // EdgeID i = i_start - d_i; // VertexID v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = true; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // //} //// } // // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // for (VertexID que_i = que_t0; que_i < que_t1; ++que_i) { // VertexID v = que[que_i]; //// bit_parallel_push_labels(G, //// v, //// que, //// que_h, //// sibling_es, //// num_sibling_es, //// child_es, //// num_child_es, //// tmp_d, //// d); // EdgeID i_start = G.vertices_idx[v]; // EdgeID i_bound = i_start + G.local_out_degrees[v]; // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv = G.out_edges[i]; // UnweightedDist td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v; // sibling_es[num_sibling_es].second = tv; // ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == MAX_UNWEIGHTED_DIST) { // que[que_h++] = tv; // tmp_d[tv] = td; // } // child_es[num_child_es].first = v; // child_es[num_child_es].second = tv; // ++num_child_es; // } // } // } // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // tmp_s[w].second |= tmp_s[v].first; // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // // {// test // printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (4 == d) { //// exit(EXIT_SUCCESS); //// } // } // // que_t0 = que_t1; // que_t1 = que_h; // } // // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_push_labels( const DistGraph &G, const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, std::vector<VertexID> &tmp_q, VertexID &size_tmp_q, std::vector< std::pair<VertexID, VertexID> > &tmp_sibling_es, VertexID &size_tmp_sibling_es, std::vector< std::pair<VertexID, VertexID> > &tmp_child_es, VertexID &size_tmp_child_es, const VertexID &offset_tmp_q, std::vector<UnweightedDist> &dists, const UnweightedDist iter) { EdgeID i_start = G.vertices_idx[v_global]; EdgeID i_bound = i_start + G.local_out_degrees[v_global]; // {//test // printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); // } for (EdgeID i = i_start; i < i_bound; ++i) { VertexID tv_global = G.out_edges[i]; VertexID tv_local = G.get_local_vertex_id(tv_global); UnweightedDist td = iter + 1; if (iter > dists[tv_local]) { ; } else if (iter == dists[tv_local]) { if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].first = v_global; tmp_sibling_es[offset_tmp_q + size_tmp_sibling_es].second = tv_global; ++size_tmp_sibling_es; // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; } } else { // iter < dists[tv] if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { if (CAS(dists.data() + tv_local, MAX_UNWEIGHTED_DIST, td)) { tmp_q[offset_tmp_q + size_tmp_q++] = tv_global; } } // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } tmp_child_es[offset_tmp_q + size_tmp_child_es].first = v_global; tmp_child_es[offset_tmp_q + size_tmp_child_es].second = tv_global; ++size_tmp_child_es; // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; } } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: bit_parallel_labeling( const DistGraph &G, // std::vector<IndexType> &L, std::vector<uint8_t> &used_bp_roots) { // Class type of Bit-Parallel label message unit. struct MsgUnitBP { VertexID v_global; uint64_t S_n1; uint64_t S_0; MsgUnitBP() = default; // MsgUnitBP(MsgUnitBP&& other) = default; // MsgUnitBP(MsgUnitBP& other) = default; // MsgUnitBP& operator=(const MsgUnitBP& other) = default; // MsgUnitBP& operator=(MsgUnitBP&& other) = default; MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) : v_global(v), S_n1(sn1), S_0(s0) { } }; // VertexID num_v = G.num_v; // EdgeID num_e = G.num_e; EdgeID local_num_edges = G.num_edges_local; std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<VertexID> que(num_masters); // active queue VertexID end_que = 0; std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que VertexID end_tmp_que = 0; std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. VertexID r_global = 0; // root r for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // {// test // if (0 == host_id) { // printf("i_bpsp: %u\n", i_bpspt); // } // } // Select the root r_global if (0 == host_id) { while (r_global < num_v && used_bp_roots[r_global]) { ++r_global; } if (r_global == num_v) { for (VertexID v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; } continue; } } // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&r_global, 1, V_ID_Type, 0, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // VertexID que_t0 = 0, que_t1 = 0, que_h = 0; fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // Mark the r_global if (G.get_master_host_id(r_global) == host_id) { tmp_d[G.get_local_vertex_id(r_global)] = 0; que[end_que++] = r_global; } // Select the r_global's 64 neighbors { // Get r_global's neighbors into buffer_send, rank from high to low. VertexID local_degree = G.local_out_degrees[r_global]; std::vector<VertexID> buffer_send(local_degree); if (local_degree) { EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; for (VertexID d_i = 0; d_i < local_degree; ++d_i) { EdgeID e_i = e_i_start - d_i; buffer_send[d_i] = G.out_edges[e_i]; } } // Get selected neighbors (up to 64) std::vector<VertexID> selected_nbrs; if (0 != host_id) { // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); MPI_Instance::send_buffer_2_dst(buffer_send, 0, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // Receive selected neighbors from host 0 MPI_Instance::recv_buffer_from_src(selected_nbrs, 0, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); } else { // Host 0 // Host 0 receives neighbors from others std::vector<VertexID> all_nbrs(buffer_send); std::vector<VertexID > buffer_recv; for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); MPI_Instance::recv_buffer_from_any(buffer_recv, SENDING_ROOT_NEIGHBORS, SENDING_SIZE_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); if (buffer_recv.empty()) { continue; } buffer_send.resize(buffer_send.size() + buffer_recv.size()); std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); all_nbrs.resize(buffer_send.size()); all_nbrs.assign(buffer_send.begin(), buffer_send.end()); } assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // Select 64 (or less) neighbors VertexID ns = 0; // number of selected neighbor, default 64 for (VertexID v_global : all_nbrs) { if (used_bp_roots[v_global]) { continue; } used_bp_roots[v_global] = 1; selected_nbrs.push_back(v_global); if (++ns == 64) { break; } } // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); for (int dest = 1; dest < num_hosts; ++dest) { MPI_Instance::send_buffer_2_dst(selected_nbrs, dest, SENDING_SELECTED_NEIGHBORS, SENDING_SIZE_SELETED_NEIGHBORS); } // message_time += WallTimer::get_time_mark(); } // {//test // printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); // } // Synchronize the used_bp_roots. for (VertexID v_global : selected_nbrs) { used_bp_roots[v_global] = 1; } // Mark selected neighbors for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { VertexID v_global = selected_nbrs[v_i]; if (host_id != G.get_master_host_id(v_global)) { continue; } tmp_que[end_tmp_que++] = v_global; tmp_d[G.get_local_vertex_id(v_global)] = 1; tmp_s[v_global].first = 1ULL << v_i; } } // Reduce the global number of active vertices VertexID global_num_actives = 1; UnweightedDist d = 0; while (global_num_actives) { //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("d: %u que_size: %u\n", d, global_num_actives); // } // } //#endif // for (UnweightedDist d = 0; que_t0 < que_h; ++d) { VertexID num_sibling_es = 0, num_child_es = 0; // Send active masters to mirrors { std::vector<MsgUnitBP> buffer_send(end_que); for (VertexID que_i = 0; que_i < end_que; ++que_i) { VertexID v_global = que[que_i]; buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); } // {// test // printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); // } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgUnitBP> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } // For parallel adding to queue VertexID size_buffer_recv = buffer_recv.size(); std::vector<VertexID> offsets_tmp_q(size_buffer_recv); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_buffer_recv; ++i_q) { offsets_tmp_q[i_q] = G.local_out_degrees[buffer_recv[i_q].v_global]; } VertexID num_neighbors = PADO::prefix_sum_for_offsets(offsets_tmp_q); std::vector<VertexID> tmp_q(num_neighbors); std::vector<VertexID> sizes_tmp_q(size_buffer_recv, 0); // For parallel adding to sibling_es std::vector< std::pair<VertexID, VertexID> > tmp_sibling_es(num_neighbors); std::vector<VertexID> sizes_tmp_sibling_es(size_buffer_recv, 0); // For parallel adding to child_es std::vector< std::pair<VertexID, VertexID> > tmp_child_es(num_neighbors); std::vector<VertexID> sizes_tmp_child_es(size_buffer_recv, 0); #pragma omp parallel for // for (const MsgUnitBP &m : buffer_recv) { for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgUnitBP &m = buffer_recv[i_m]; VertexID v_global = m.v_global; if (!G.local_out_degrees[v_global]) { continue; } tmp_s[v_global].first = m.S_n1; tmp_s[v_global].second = m.S_0; // Push labels bit_parallel_push_labels( G, v_global, tmp_q, sizes_tmp_q[i_m], tmp_sibling_es, sizes_tmp_sibling_es[i_m], tmp_child_es, sizes_tmp_child_es[i_m], offsets_tmp_q[i_m], // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, tmp_d, d); } {// From tmp_sibling_es to sibling_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_sibling_es); PADO::collect_into_queue( tmp_sibling_es, offsets_tmp_q, sizes_tmp_sibling_es, total_size_tmp, sibling_es, num_sibling_es); } {// From tmp_child_es to child_es idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_child_es); PADO::collect_into_queue( tmp_child_es, offsets_tmp_q, sizes_tmp_child_es, total_size_tmp, child_es, num_child_es); } {// From tmp_q to tmp_que idi total_size_tmp = PADO::prefix_sum_for_offsets(sizes_tmp_q); PADO::collect_into_queue( tmp_q, offsets_tmp_q, sizes_tmp_q, total_size_tmp, tmp_que, end_tmp_que); } // {// test // printf("host_id: %u root: %u done push.\n", host_id, root); // } } } // Update the sets in tmp_s { #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first, w = sibling_es[i].second; __atomic_or_fetch(&tmp_s[v].second, tmp_s[w].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[w].second, tmp_s[v].first, __ATOMIC_SEQ_CST); // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; } // Put into the buffer sending to others std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); #pragma omp parallel for for (VertexID i = 0; i < num_sibling_es; ++i) { VertexID v = sibling_es[i].first; VertexID w = sibling_es[i].second; buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); } // Send the messages for (int root = 0; root < num_hosts; ++root) { std::vector< std::pair<VertexID, uint64_t> > buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } size_t i_m_bound = buffer_recv.size(); #pragma omp parallel for for (size_t i_m = 0; i_m < i_m_bound; ++i_m) { const auto &m = buffer_recv[i_m]; __atomic_or_fetch(&tmp_s[m.first].second, m.second, __ATOMIC_SEQ_CST); } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } } #pragma omp parallel for for (VertexID i = 0; i < num_child_es; ++i) { VertexID v = child_es[i].first, c = child_es[i].second; __atomic_or_fetch(&tmp_s[c].first, tmp_s[v].first, __ATOMIC_SEQ_CST); __atomic_or_fetch(&tmp_s[c].second, tmp_s[v].second, __ATOMIC_SEQ_CST); // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; } } //#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // //// printf("iter %u @%u host_id: %u num_sibling_es: %u num_child_es: %u\n", d, __LINE__, host_id, num_sibling_es, num_child_es); //// if (0 == d) { //// exit(EXIT_SUCCESS); //// } // } //#endif // Swap que and tmp_que tmp_que.swap(que); end_que = end_tmp_que; end_tmp_que = 0; MPI_Allreduce(&end_que, &global_num_actives, 1, V_ID_Type, MPI_MAX, MPI_COMM_WORLD); // } ++d; } #pragma omp parallel for for (VertexID v_local = 0; v_local < num_masters; ++v_local) { VertexID v_global = G.get_global_vertex_id(v_local); L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_push_labels( // const DistGraph &G, // const VertexID v_global, // std::vector<VertexID> &tmp_que, // VertexID &end_tmp_que, // std::vector< std::pair<VertexID, VertexID> > &sibling_es, // VertexID &num_sibling_es, // std::vector< std::pair<VertexID, VertexID> > &child_es, // VertexID &num_child_es, // std::vector<UnweightedDist> &dists, // const UnweightedDist iter) //{ // EdgeID i_start = G.vertices_idx[v_global]; // EdgeID i_bound = i_start + G.local_out_degrees[v_global]; //// {//test //// printf("host_id: %u local_out_degrees[%u]: %u\n", host_id, v_global, G.local_out_degrees[v_global]); //// } // for (EdgeID i = i_start; i < i_bound; ++i) { // VertexID tv_global = G.out_edges[i]; // VertexID tv_local = G.get_local_vertex_id(tv_global); // UnweightedDist td = iter + 1; // // if (iter > dists[tv_local]) { // ; // } else if (iter == dists[tv_local]) { // if (v_global < tv_global) { // ??? Why need v < tv !!! Because it's a undirected graph. // sibling_es[num_sibling_es].first = v_global; // sibling_es[num_sibling_es].second = tv_global; // ++num_sibling_es; // } // } else { // iter < dists[tv] // if (dists[tv_local] == MAX_UNWEIGHTED_DIST) { // tmp_que[end_tmp_que++] = tv_global; // dists[tv_local] = td; // } // child_es[num_child_es].first = v_global; // child_es[num_child_es].second = tv_global; // ++num_child_es; //// { //// printf("host_id: %u num_child_es: %u v_global: %u tv_global: %u\n", host_id, num_child_es, v_global, tv_global);//test //// } // } // } // //} // //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //bit_parallel_labeling( // const DistGraph &G, //// std::vector<IndexType> &L, // std::vector<uint8_t> &used_bp_roots) //{ // // Class type of Bit-Parallel label message unit. // struct MsgUnitBP { // VertexID v_global; // uint64_t S_n1; // uint64_t S_0; // // MsgUnitBP() = default; //// MsgUnitBP(MsgUnitBP&& other) = default; //// MsgUnitBP(MsgUnitBP& other) = default; //// MsgUnitBP& operator=(const MsgUnitBP& other) = default; //// MsgUnitBP& operator=(MsgUnitBP&& other) = default; // MsgUnitBP(VertexID v, uint64_t sn1, uint64_t s0) // : v_global(v), S_n1(sn1), S_0(s0) { } // }; //// VertexID num_v = G.num_v; //// EdgeID num_e = G.num_e; // EdgeID local_num_edges = G.num_edges_local; // // std::vector<UnweightedDist> tmp_d(num_masters); // distances from the root to every v // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<VertexID> que(num_masters); // active queue // VertexID end_que = 0; // std::vector<VertexID> tmp_que(num_masters); // temporary queue, to be swapped with que // VertexID end_tmp_que = 0; // std::vector<std::pair<VertexID, VertexID> > sibling_es(local_num_edges); // siblings, their distances to the root are equal (have difference of 0) // std::vector<std::pair<VertexID, VertexID> > child_es(local_num_edges); // child and father, their distances to the root have difference of 1. // //// std::vector<UnweightedDist> tmp_d(num_v); // distances from the root to every v //// std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} //// std::vector<VertexID> que(num_v); // active queue //// std::vector<std::pair<VertexID, VertexID> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) //// std::vector<std::pair<VertexID, VertexID> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // VertexID r_global = 0; // root r // for (VertexID i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // // Select the root r_global // if (0 == host_id) { // while (r_global < num_v && used_bp_roots[r_global]) { // ++r_global; // } // if (r_global == num_v) { // for (VertexID v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = MAX_UNWEIGHTED_DIST; // } // continue; // } // } // // Broadcast the r here. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&r_global, // 1, // V_ID_Type, // 0, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // used_bp_roots[r_global] = 1; //#ifdef DEBUG_MESSAGES_ON // {//test // if (0 == host_id) { // printf("r_global: %u i_bpspt: %u\n", r_global, i_bpspt); // } // } //#endif // //// VertexID que_t0 = 0, que_t1 = 0, que_h = 0; // fill(tmp_d.begin(), tmp_d.end(), MAX_UNWEIGHTED_DIST); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // // Mark the r_global // if (G.get_master_host_id(r_global) == host_id) { // tmp_d[G.get_local_vertex_id(r_global)] = 0; // que[end_que++] = r_global; // } // // Select the r_global's 64 neighbors // { // // Get r_global's neighbors into buffer_send, rank from low to high. // VertexID local_degree = G.local_out_degrees[r_global]; // std::vector<VertexID> buffer_send(local_degree); // if (local_degree) { // EdgeID e_i_start = G.vertices_idx[r_global] + local_degree - 1; // for (VertexID d_i = 0; d_i < local_degree; ++d_i) { // EdgeID e_i = e_i_start - d_i; // buffer_send[d_i] = G.out_edges[e_i]; // } // } // // // Get selected neighbors (up to 64) // std::vector<VertexID> selected_nbrs; // if (0 != host_id) { // // Every host other than 0 sends neighbors to host 0 // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // 0, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); // // Receive selected neighbors from host 0 // MPI_Instance::recv_buffer_from_src(selected_nbrs, // 0, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // } else { // // Host 0 // // Host 0 receives neighbors from others // std::vector<VertexID> all_nbrs(buffer_send); // std::vector<VertexID > buffer_recv; // for (int loc = 0; loc < num_hosts - 1; ++loc) { // message_time -= WallTimer::get_time_mark(); // MPI_Instance::recv_buffer_from_any(buffer_recv, // SENDING_ROOT_NEIGHBORS, // SENDING_SIZE_ROOT_NEIGHBORS); //// MPI_Instance::receive_dynamic_buffer_from_any(buffer_recv, //// num_hosts, //// SENDING_ROOT_NEIGHBORS); // message_time += WallTimer::get_time_mark(); // if (buffer_recv.empty()) { // continue; // } // // buffer_send.resize(buffer_send.size() + buffer_recv.size()); // std::merge(buffer_recv.begin(), buffer_recv.end(), all_nbrs.begin(), all_nbrs.end(), buffer_send.begin()); // all_nbrs.resize(buffer_send.size()); // all_nbrs.assign(buffer_send.begin(), buffer_send.end()); // } // assert(all_nbrs.size() == G.get_global_out_degree(r_global)); // // Select 64 (or less) neighbors // VertexID ns = 0; // number of selected neighbor, default 64 // for (VertexID v_global : all_nbrs) { // if (used_bp_roots[v_global]) { // continue; // } // used_bp_roots[v_global] = 1; // selected_nbrs.push_back(v_global); // if (++ns == 64) { // break; // } // } // // Send selected neighbors to other hosts // message_time -= WallTimer::get_time_mark(); // for (int dest = 1; dest < num_hosts; ++dest) { // MPI_Instance::send_buffer_2_dst(selected_nbrs, // dest, // SENDING_SELECTED_NEIGHBORS, // SENDING_SIZE_SELETED_NEIGHBORS); // } // message_time += WallTimer::get_time_mark(); // } //// {//test //// printf("host_id: %u selected_nbrs.size(): %lu\n", host_id, selected_nbrs.size()); //// } // // // Synchronize the used_bp_roots. // for (VertexID v_global : selected_nbrs) { // used_bp_roots[v_global] = 1; // } // // // Mark selected neighbors // for (VertexID v_i = 0; v_i < selected_nbrs.size(); ++v_i) { // VertexID v_global = selected_nbrs[v_i]; // if (host_id != G.get_master_host_id(v_global)) { // continue; // } // tmp_que[end_tmp_que++] = v_global; // tmp_d[G.get_local_vertex_id(v_global)] = 1; // tmp_s[v_global].first = 1ULL << v_i; // } // } // // // Reduce the global number of active vertices // VertexID global_num_actives = 1; // UnweightedDist d = 0; // while (global_num_actives) { //// for (UnweightedDist d = 0; que_t0 < que_h; ++d) { // VertexID num_sibling_es = 0, num_child_es = 0; // // // // Send active masters to mirrors // { // std::vector<MsgUnitBP> buffer_send(end_que); // for (VertexID que_i = 0; que_i < end_que; ++que_i) { // VertexID v_global = que[que_i]; // buffer_send[que_i] = MsgUnitBP(v_global, tmp_s[v_global].first, tmp_s[v_global].second); // } //// {// test //// printf("host_id: %u buffer_send.size(): %lu\n", host_id, buffer_send.size()); //// } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgUnitBP> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgUnitBP &m : buffer_recv) { // VertexID v_global = m.v_global; // if (!G.local_out_degrees[v_global]) { // continue; // } // tmp_s[v_global].first = m.S_n1; // tmp_s[v_global].second = m.S_0; // // Push labels // bit_parallel_push_labels(G, // v_global, // tmp_que, // end_tmp_que, // sibling_es, // num_sibling_es, // child_es, // num_child_es, // tmp_d, // d); // } //// {// test //// printf("host_id: %u root: %u done push.\n", host_id, root); //// } // } // } // // // Update the sets in tmp_s // { // // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first, w = sibling_es[i].second; // tmp_s[v].second |= tmp_s[w].first; // !!! Need to send back!!! // tmp_s[w].second |= tmp_s[v].first; // // } // // Put into the buffer sending to others // std::vector< std::pair<VertexID, uint64_t> > buffer_send(2 * num_sibling_es); //// std::vector< std::vector<MPI_Request> > requests_list(num_hosts - 1); // for (VertexID i = 0; i < num_sibling_es; ++i) { // VertexID v = sibling_es[i].first; // VertexID w = sibling_es[i].second; //// buffer_send.emplace_back(v, tmp_s[v].second); //// buffer_send.emplace_back(w, tmp_s[w].second); // buffer_send[2 * i] = std::make_pair(v, tmp_s[v].second); // buffer_send[2 * i + 1] = std::make_pair(w, tmp_s[w].second); // } // // Send the messages // for (int root = 0; root < num_hosts; ++root) { // std::vector< std::pair<VertexID, uint64_t> > buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, uint64_t> &m : buffer_recv) { // tmp_s[m.first].second |= m.second; // } // } // for (VertexID i = 0; i < num_child_es; ++i) { // VertexID v = child_es[i].first, c = child_es[i].second; // tmp_s[c].first |= tmp_s[v].first; // tmp_s[c].second |= tmp_s[v].second; // } // } ////#ifdef DEBUG_MESSAGES_ON // {// test // VertexID global_num_sibling_es; // VertexID global_num_child_es; // MPI_Allreduce(&num_sibling_es, // &global_num_sibling_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // MPI_Allreduce(&num_child_es, // &global_num_child_es, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // if (0 == host_id) { // printf("iter: %u num_sibling_es: %u num_child_es: %u\n", d, global_num_sibling_es, global_num_child_es); // } // } ////#endif // // // Swap que and tmp_que // tmp_que.swap(que); // end_que = end_tmp_que; // end_tmp_que = 0; // MPI_Allreduce(&end_que, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // //// } // ++d; // } // // for (VertexID v_local = 0; v_local < num_masters; ++v_local) { // VertexID v_global = G.get_global_vertex_id(v_local); // L[v_local].bp_dist[i_bpspt] = tmp_d[v_local]; // L[v_local].bp_sets[i_bpspt][0] = tmp_s[v_global].first; // S_r^{-1} // L[v_local].bp_sets[i_bpspt][1] = tmp_s[v_global].second & ~tmp_s[v_global].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } //} //// Function bit parallel checking: //// return false if shortest distance exits in bp labels, return true if bp labels cannot cover the distance //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline bool DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>::bit_parallel_checking( // VertexID v_id, // VertexID w_id, // const std::vector<IndexType> &L, // UnweightedDist iter) //{ // // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // const IndexType &Lv = L[v_id]; // const IndexType &Lw = L[w_id]; // // _mm_prefetch(&Lv.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lv.bp_sets[0][0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&Lw.bp_sets[0][0], _MM_HINT_T0); // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = Lv.bp_dist[i] + Lw.bp_dist[i]; // Use type VertexID in case of addition of two INF. // if (td - 2 <= iter) { // td += // (Lv.bp_sets[i][0] & Lw.bp_sets[i][0]) ? -2 : // ((Lv.bp_sets[i][0] & Lw.bp_sets[i][1]) | // (Lv.bp_sets[i][1] & Lw.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { //// ++bp_hit_count; // return false; // } // } // } // return true; //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <VertexID BATCH_SIZE> inline VertexID DistBVCPLL<BATCH_SIZE>:: initialization( const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, // VertexID b_id, VertexID roots_start, VertexID roots_size, // std::vector<VertexID> &roots_master_local, const std::vector<uint8_t> &used_bp_roots) { // Get the roots_master_local, containing all local roots. std::vector<VertexID> roots_master_local; VertexID size_roots_master_local; VertexID roots_bound = roots_start + roots_size; try { for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { roots_master_local.push_back(G.get_local_vertex_id(r_global)); } } size_roots_master_local = roots_master_local.size(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_roots_master_local: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Short_index { if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } else { for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local = once_candidated_queue[v_i]; short_index[v_local].indicator_reset(); once_candidated[v_local] = 0; } } end_once_candidated_queue = 0; if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } else { for (VertexID r_local : roots_master_local) { short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels } } } // // Real Index try { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } else { for (VertexID r_local : roots_master_local) { IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size Lr.distances.emplace_back( Lr.vertices.size(), // start_index 1, // size 0); // dist Lr.vertices.push_back(G.get_global_vertex_id(r_local)); // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_real_index: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Dist Table try { // struct LabelTableUnit { // VertexID root_id; // VertexID label_global_id; // UnweightedDist dist; // // LabelTableUnit() = default; // // LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : // root_id(r), label_global_id(l), dist(d) {} // }; std::vector<LabelTableUnit> buffer_send; // buffer for sending // Dist_matrix { // Deprecated Old method: unpack the IndexType structure before sending. // Okay, it's back. if (size_roots_master_local >= THRESHOLD_PARALLEL) { // Offsets for adding labels to buffer_send in parallel std::vector<VertexID> offsets_beffer_send(size_roots_master_local); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; offsets_beffer_send[i_r] = L[r_local].vertices.size(); } EdgeID size_labels = PADO::prefix_sum_for_offsets(offsets_beffer_send); buffer_send.resize(size_labels); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; VertexID top_location = 0; IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table // buffer_send[offsets_beffer_send[i_r] + top_location++] = // LabelTableUnit(r_root_id, Lr.vertices[v_i] + id_offset, dist); buffer_send[offsets_beffer_send[i_r] + top_location++] = LabelTableUnit(r_root_id, Lr.vertices[v_i], dist); } } // } } } else { for (VertexID r_local : roots_master_local) { // The distance table. IndexType &Lr = L[r_local]; VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lr.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { VertexID v_start_index = Lr.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; UnweightedDist dist = Lr.distances[dist_i].dist; // Traverse vertices array for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // Write into the dist_table buffer_send.emplace_back(r_root_id, Lr.vertices[v_i], dist); // buffer for sending // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending } } // } } } } // Broadcast local roots labels for (int root = 0; root < num_hosts; ++root) { std::vector<LabelTableUnit> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record root_id's number of its received label, for later adding to recved_dist_table __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); // recved_dist_table[root_id].push_back(label_global_id); } // Record the received label in recved_dist_table, for later reset #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID &size = sizes_recved_root_labels[root_id]; if (size) { recved_dist_table[root_id].resize(size); size = 0; } } #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const LabelTableUnit &l = buffer_recv[i_l]; VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], label_global_id); } } else { for (const LabelTableUnit &l : buffer_recv) { VertexID root_id = l.root_id; VertexID label_global_id = l.label_global_id; UnweightedDist dist = l.dist; dist_table[root_id][label_global_id] = dist; // Record the received label in recved_dist_table, for later reset recved_dist_table[root_id].push_back(label_global_id); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_dist_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build the Bit-Parallel Labels Table try { // struct MsgBPLabel { // VertexID r_root_id; // UnweightedDist bp_dist[BITPARALLEL_SIZE]; // uint64_t bp_sets[BITPARALLEL_SIZE][2]; // // MsgBPLabel() = default; // MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) // : r_root_id(r) // { // memcpy(bp_dist, dist, sizeof(bp_dist)); // memcpy(bp_sets, sets, sizeof(bp_sets)); // } // }; // std::vector<MPI_Request> requests_send(num_hosts - 1); std::vector<MsgBPLabel> buffer_send; std::vector<VertexID> roots_queue; for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { if (G.get_master_host_id(r_global) != host_id) { continue; } roots_queue.push_back(r_global); } VertexID size_roots_queue = roots_queue.size(); if (size_roots_queue >= THRESHOLD_PARALLEL) { buffer_send.resize(size_roots_queue); #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_queue; ++i_r) { VertexID r_global = roots_queue[i_r]; VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); buffer_send[i_r] = MsgBPLabel(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } else { // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } for (VertexID r_global : roots_queue) { VertexID r_local = G.get_local_vertex_id(r_global); VertexID r_root = r_global - roots_start; // Local roots // memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // Prepare for sending buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); } } for (int root = 0; root < num_hosts; ++root) { std::vector<MsgBPLabel> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } VertexID size_buffer_recv = buffer_recv.size(); if (size_buffer_recv >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_m = 0; i_m < size_buffer_recv; ++i_m) { const MsgBPLabel &m = buffer_recv[i_m]; VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } else { for (const MsgBPLabel &m : buffer_recv) { VertexID r_root = m.r_root_id; memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); } } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("initialization_bp_labels_table: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Active_queue VertexID global_num_actives = 0; // global number of active vertices. { if (size_roots_master_local >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (VertexID i_r = 0; i_r < size_roots_master_local; ++i_r) { VertexID r_local = roots_master_local[i_r]; active_queue[i_r] = r_local; } end_active_queue = size_roots_master_local; } else { for (VertexID r_local : roots_master_local) { active_queue[end_active_queue++] = r_local; } } // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, // MPI_SUM, MPI_MAX, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); } return global_num_actives; } // Sequential Version //// Function for initializing at the begin of a batch //// For a batch, initialize the temporary labels and real labels of roots; //// traverse roots' labels to initialize distance buffer; //// unset flag arrays is_active and got_labels //template <VertexID BATCH_SIZE> //inline VertexID DistBVCPLL<BATCH_SIZE>:: //initialization( // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated, // VertexID b_id, // VertexID roots_start, // VertexID roots_size, //// std::vector<VertexID> &roots_master_local, // const std::vector<uint8_t> &used_bp_roots) //{ // // Get the roots_master_local, containing all local roots. // std::vector<VertexID> roots_master_local; // VertexID roots_bound = roots_start + roots_size; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) == host_id && !used_bp_roots[r_global]) { // roots_master_local.push_back(G.get_local_vertex_id(r_global)); // } // } // // Short_index // { // for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // VertexID v_local = once_candidated_queue[v_i]; // short_index[v_local].indicator_reset(); // once_candidated[v_local] = 0; // } // end_once_candidated_queue = 0; // for (VertexID r_local : roots_master_local) { // short_index[r_local].indicator[G.get_global_vertex_id(r_local) - roots_start] = 1; // v itself // short_index[r_local].indicator[BATCH_SIZE] = 1; // v got labels //// short_index[r_local].indicator.set(G.get_global_vertex_id(r_local) - roots_start); // v itself //// short_index[r_local].indicator.set(BATCH_SIZE); // v got labels // } // } //// // // Real Index // { // for (VertexID r_local : roots_master_local) { // IndexType &Lr = L[r_local]; // Lr.batches.emplace_back( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1); // size // Lr.distances.emplace_back( // Lr.vertices.size(), // start_index // 1, // size // 0); // dist // Lr.vertices.push_back(G.get_global_vertex_id(r_local) - roots_start); // } // } // // // Dist Table // { //// struct LabelTableUnit { //// VertexID root_id; //// VertexID label_global_id; //// UnweightedDist dist; //// //// LabelTableUnit() = default; //// //// LabelTableUnit(VertexID r, VertexID l, UnweightedDist d) : //// root_id(r), label_global_id(l), dist(d) {} //// }; // std::vector<LabelTableUnit> buffer_send; // buffer for sending // // Dist_matrix // { // // Deprecated Old method: unpack the IndexType structure before sending. // for (VertexID r_local : roots_master_local) { // // The distance table. // IndexType &Lr = L[r_local]; // VertexID r_root_id = G.get_global_vertex_id(r_local) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // // Traverse batches array // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse distances array // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // UnweightedDist dist = Lr.distances[dist_i].dist; // // Traverse vertices array // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // // Write into the dist_table //// dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = dist; // distance table // buffer_send.emplace_back(r_root_id, Lr.vertices[v_i] + id_offset, // dist); // buffer for sending // } // } // } // } // } // // Broadcast local roots labels // for (int root = 0; root < num_hosts; ++root) { // std::vector<LabelTableUnit> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const LabelTableUnit &l : buffer_recv) { // VertexID root_id = l.root_id; // VertexID label_global_id = l.label_global_id; // UnweightedDist dist = l.dist; // dist_table[root_id][label_global_id] = dist; // // Record the received label in recved_dist_table, for later reset // recved_dist_table[root_id].push_back(label_global_id); // } // } // } // // // Build the Bit-Parallel Labels Table // { //// struct MsgBPLabel { //// VertexID r_root_id; //// UnweightedDist bp_dist[BITPARALLEL_SIZE]; //// uint64_t bp_sets[BITPARALLEL_SIZE][2]; //// //// MsgBPLabel() = default; //// MsgBPLabel(VertexID r, const UnweightedDist dist[], const uint64_t sets[][2]) //// : r_root_id(r) //// { //// memcpy(bp_dist, dist, sizeof(bp_dist)); //// memcpy(bp_sets, sets, sizeof(bp_sets)); //// } //// }; //// std::vector<MPI_Request> requests_send(num_hosts - 1); // std::vector<MsgBPLabel> buffer_send; // for (VertexID r_global = roots_start; r_global < roots_bound; ++r_global) { // if (G.get_master_host_id(r_global) != host_id) { // continue; // } // VertexID r_local = G.get_local_vertex_id(r_global); // VertexID r_root = r_global - roots_start; // // Local roots //// memcpy(bp_labels_table[r_root].bp_dist, L[r_local].bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); //// memcpy(bp_labels_table[r_root].bp_sets, L[r_local].bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // // Prepare for sending // buffer_send.emplace_back(r_root, L[r_local].bp_dist, L[r_local].bp_sets); // } // // for (int root = 0; root < num_hosts; ++root) { // std::vector<MsgBPLabel> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const MsgBPLabel &m : buffer_recv) { // VertexID r_root = m.r_root_id; // memcpy(bp_labels_table[r_root].bp_dist, m.bp_dist, sizeof(bp_labels_table[r_root].bp_dist)); // memcpy(bp_labels_table[r_root].bp_sets, m.bp_sets, sizeof(bp_labels_table[r_root].bp_sets)); // } // } // } // // // TODO: parallel enqueue // // Active_queue // VertexID global_num_actives = 0; // global number of active vertices. // { // for (VertexID r_local : roots_master_local) { // active_queue[end_active_queue++] = r_local; // } // // Get the global number of active vertices; // message_time -= WallTimer::get_time_mark(); // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // } // // return global_num_actives; //} //// Function: push v_head_global's newly added labels to its all neighbors. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //push_single_label( // VertexID v_head_global, // VertexID label_root_id, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // const BPLabelType &L_label = bp_labels_table[label_root_id]; // VertexID label_global_id = label_root_id + roots_start; // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // all remaining v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // if (v_tail_global <= label_global_id) { // // remaining v_tail_global has higher rank than the label // return; // } // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } //// {// Just for the complain from the compiler //// assert(iter >= iter); //// } //} template<VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_pushing_para( const DistGraph &G, const VertexID roots_start, const std::vector<uint8_t> &used_bp_roots, const std::vector<VertexID> &active_queue, const VertexID global_start, const VertexID global_size, const VertexID local_size, // const VertexID start_active_queue, // const VertexID size_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, const std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, std::vector<uint8_t> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const UnweightedDist iter) { std::vector<std::pair<VertexID, VertexID> > buffer_send_indices; //.first: Vertex ID //.second: size of labels std::vector<VertexID> buffer_send_labels; if (local_size) { const VertexID start_active_queue = global_start; const VertexID size_active_queue = global_size <= local_size ? global_size : local_size; const VertexID bound_active_queue = start_active_queue + size_active_queue; buffer_send_indices.resize(size_active_queue); // Prepare offset for inserting std::vector<VertexID> offsets_buffer_locs(size_active_queue); #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active const IndexType &Lv = L[v_head_local]; offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; } EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); try { buffer_send_labels.resize(size_buffer_send_labels); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.buffer_send_labels: bad_alloc " "host_id: %d " "size_buffer_send_labels: %lu " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, size_buffer_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Build buffer_send_labels by parallel inserting #pragma omp parallel for for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { VertexID v_head_local = active_queue[i_q]; is_active[v_head_local] = 0; // reset is_active VertexID v_head_global = G.get_global_vertex_id(v_head_local); const IndexType &Lv = L[v_head_local]; // Prepare the buffer_send_indices VertexID tmp_i_q = i_q - start_active_queue; buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // These 2 index are used for traversing v_head's last inserted labels VertexID l_i_start = Lv.distances.rbegin()->start_index; VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; VertexID top_labels = offsets_buffer_locs[tmp_i_q]; for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { VertexID label_root_id = Lv.vertices[l_i] - roots_start; buffer_send_labels[top_labels++] = label_root_id; // buffer_send_labels.push_back(label_root_id); } } } //////////////////////////////////////////////// //// // const VertexID bound_active_queue = start_active_queue + size_active_queue; // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(size_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // // Parallel Version // // Prepare offset for inserting // std::vector<VertexID> offsets_buffer_locs(size_active_queue); //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // const IndexType &Lv = L[v_head_local]; // offsets_buffer_locs[i_q - start_active_queue] = Lv.distances.rbegin()->size; // } // EdgeID size_buffer_send_labels = PADO::prefix_sum_for_offsets(offsets_buffer_locs); //// {// test //// if (0 == host_id) { //// double memtotal = 0; //// double memfree = 0; //// double bytes_buffer_send_labels = size_buffer_send_labels * sizeof(VertexID); //// PADO::Utils::system_memory(memtotal, memfree); //// printf("bytes_buffer_send_labels: %fGB memtotal: %fGB memfree: %fGB\n", //// bytes_buffer_send_labels / (1 << 30), memtotal / 1024, memfree / 1024); //// } //// } // buffer_send_labels.resize(size_buffer_send_labels); //// {// test //// if (0 == host_id) { //// printf("buffer_send_labels created.\n"); //// } //// } // // // Build buffer_send_labels by parallel inserting //#pragma omp parallel for // for (VertexID i_q = start_active_queue; i_q < bound_active_queue; ++i_q) { // VertexID tmp_i_q = i_q - start_active_queue; // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[tmp_i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // VertexID top_labels = offsets_buffer_locs[tmp_i_q]; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels[top_labels++] = label_root_id; //// buffer_send_labels.push_back(label_root_id); // } // } //// end_active_queue = 0; //// //////////////////////////////////////////////// for (int root = 0; root < num_hosts; ++root) { // Get the indices std::vector<std::pair<VertexID, VertexID> > indices_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_indices, indices_buffer); if (indices_buffer.empty()) { continue; } // Get the labels std::vector<VertexID> labels_buffer; one_host_bcasts_buffer_to_buffer(root, buffer_send_labels, labels_buffer); VertexID size_indices_buffer = indices_buffer.size(); // Prepare the offsets for reading indices_buffer std::vector<EdgeID> starts_locs_index(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; starts_locs_index[i_i] = e.second; } EdgeID total_recved_labels = PADO::prefix_sum_for_offsets(starts_locs_index); // Prepare the offsets for inserting v_tails into queue std::vector<VertexID> offsets_tmp_queue(size_indices_buffer); #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { const std::pair<VertexID, VertexID> &e = indices_buffer[i_i]; offsets_tmp_queue[i_i] = G.local_out_degrees[e.first]; } EdgeID num_ngbrs = PADO::prefix_sum_for_offsets(offsets_tmp_queue); std::vector<VertexID> tmp_got_candidates_queue; std::vector<VertexID> sizes_tmp_got_candidates_queue; std::vector<VertexID> tmp_once_candidated_queue; std::vector<VertexID> sizes_tmp_once_candidated_queue; try { tmp_got_candidates_queue.resize(num_ngbrs); sizes_tmp_got_candidates_queue.resize(size_indices_buffer, 0); tmp_once_candidated_queue.resize(num_ngbrs); sizes_tmp_once_candidated_queue.resize(size_indices_buffer, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("schedule_label_pushing_para.tmp_queues: bad_alloc " "host_id: %d " "num_ngbrs: %lu " "size_indices_buffer: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, num_ngbrs, size_indices_buffer, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_i = 0; i_i < size_indices_buffer; ++i_i) { VertexID v_head_global = indices_buffer[i_i].first; EdgeID start_index = starts_locs_index[i_i]; EdgeID bound_index = i_i != size_indices_buffer - 1 ? starts_locs_index[i_i + 1] : total_recved_labels; if (G.local_out_degrees[v_head_global]) { local_push_labels_para( v_head_global, start_index, bound_index, roots_start, labels_buffer, G, short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, tmp_got_candidates_queue, sizes_tmp_got_candidates_queue[i_i], offsets_tmp_queue[i_i], got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_i], once_candidated, bp_labels_table, used_bp_roots, iter); } } {// Collect elements from tmp_got_candidates_queue to got_candidates_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_got_candidates_queue); PADO::collect_into_queue( tmp_got_candidates_queue, offsets_tmp_queue, // the locations for reading tmp_got_candidate_queue sizes_tmp_got_candidates_queue, // the locations for writing got_candidate_queue total_new, got_candidates_queue, end_got_candidates_queue); } {// Collect elements from tmp_once_candidated_queue to once_candidated_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); PADO::collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, // the locations for reading tmp_once_candidats_queue sizes_tmp_once_candidated_queue, // the locations for writing once_candidated_queue total_new, once_candidated_queue, end_once_candidated_queue); } } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_para( const VertexID v_head_global, const EdgeID start_index, const EdgeID bound_index, const VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, std::vector<VertexID> &tmp_got_candidates_queue, VertexID &size_tmp_got_candidates_queue, const VertexID offset_tmp_queue, std::vector<uint8_t> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, std::vector<VertexID> &tmp_once_candidated_queue, VertexID &size_tmp_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator[label_root_id] = 1; {// Deal with race condition if (!PADO::CAS(SI_v_tail.indicator.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { // The label is already selected before continue; } } // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in if (PADO::CAS(once_candidated.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail_local; } // once_candidated[v_tail_local] = 1; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = 1; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(SI_v_tail.is_candidate.data() + label_root_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { PADO::TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); } } // Add into got_candidates queue // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = 1; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } if (!got_candidates[v_tail_local]) { if (CAS(got_candidates.data() + v_tail_local, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { tmp_got_candidates_queue[offset_tmp_queue + size_tmp_got_candidates_queue++] = v_tail_local; } } } } // { // assert(iter >= iter); // } } // Function: pushes v_head's labels to v_head's every (master) neighbor template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: local_push_labels_seq( VertexID v_head_global, EdgeID start_index, EdgeID bound_index, VertexID roots_start, const std::vector<VertexID> &labels_buffer, const DistGraph &G, std::vector<ShortIndex> &short_index, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated, const std::vector<BPLabelType> &bp_labels_table, const std::vector<uint8_t> &used_bp_roots, const UnweightedDist iter) { // Traverse v_head's every neighbor v_tail EdgeID e_i_start = G.vertices_idx[v_head_global]; EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { VertexID v_tail_global = G.out_edges[e_i]; if (used_bp_roots[v_tail_global]) { continue; } if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. return; } // Traverse v_head's last inserted labels for (VertexID l_i = start_index; l_i < bound_index; ++l_i) { VertexID label_root_id = labels_buffer[l_i]; VertexID label_global_id = label_root_id + roots_start; if (v_tail_global <= label_global_id) { // v_tail_global has higher rank than the label continue; } VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); const IndexType &L_tail = L[v_tail_local]; ShortIndex &SI_v_tail = short_index[v_tail_local]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail_global SI_v_tail.indicator[label_root_id] = 1; // SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail_local]) { // If v_tail_global is not in the once_candidated_queue yet, add it in once_candidated[v_tail_local] = 1; once_candidated_queue[end_once_candidated_queue++] = v_tail_local; } // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // const IndexType &L_label = L[label_global_id]; // _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); // _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); const BPLabelType &L_label = bp_labels_table[label_root_id]; bool no_need_add = false; for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; break; } } } if (no_need_add) { continue; } if (SI_v_tail.is_candidate[label_root_id]) { continue; } SI_v_tail.is_candidate[label_root_id] = 1; SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; if (!got_candidates[v_tail_local]) { // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) got_candidates[v_tail_local] = 1; got_candidates_queue[end_got_candidates_queue++] = v_tail_local; } } } // { // assert(iter >= iter); // } } //// Function: pushes v_head's labels to v_head's every (master) neighbor //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //local_push_labels( // VertexID v_head_local, // VertexID roots_start, // const DistGraph &G, // std::vector<ShortIndex> &short_index, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<bool> &got_candidates, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<bool> &once_candidated, // const std::vector<BPLabelType> &bp_labels_table, // const std::vector<uint8_t> &used_bp_roots, // UnweightedDist iter) //{ // // The data structure of a message //// std::vector< LabelUnitType > buffer_recv; // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin() -> start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // // Traverse v_head's every neighbor v_tail // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // EdgeID e_i_start = G.vertices_idx[v_head_global]; // EdgeID e_i_bound = e_i_start + G.local_out_degrees[v_head_global]; // for (EdgeID e_i = e_i_start; e_i < e_i_bound; ++e_i) { // VertexID v_tail_global = G.out_edges[e_i]; // if (used_bp_roots[v_tail_global]) { // continue; // } // if (v_tail_global < roots_start) { // v_tail_global has higher rank than any roots, then no roots can push new labels to it. // return; // } // // // Traverse v_head's last inserted labels // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // VertexID label_global_id = label_root_id + roots_start; // if (v_tail_global <= label_global_id) { // // v_tail_global has higher rank than the label // continue; // } // VertexID v_tail_local = G.get_local_vertex_id(v_tail_global); // const IndexType &L_tail = L[v_tail_local]; // ShortIndex &SI_v_tail = short_index[v_tail_local]; // if (SI_v_tail.indicator[label_root_id]) { // // The label is already selected before // continue; // } // // Record label_root_id as once selected by v_tail_global // SI_v_tail.indicator.set(label_root_id); // // Add into once_candidated_queue // // if (!once_candidated[v_tail_local]) { // // If v_tail_global is not in the once_candidated_queue yet, add it in // once_candidated[v_tail_local] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail_local; // } // // // Bit Parallel Checking: if label_global_id to v_tail_global has shorter distance already // // ++total_check_count; //// const IndexType &L_label = L[label_global_id]; //// _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); //// _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); //// bp_checking_ins_count.measure_start(); // const BPLabelType &L_label = bp_labels_table[label_root_id]; // bool no_need_add = false; // for (VertexID i = 0; i < BITPARALLEL_SIZE; ++i) { // VertexID td = L_label.bp_dist[i] + L_tail.bp_dist[i]; // if (td - 2 <= iter) { // td += // (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : // ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | // (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) // ? -1 : 0; // if (td <= iter) { // no_need_add = true; //// ++bp_hit_count; // break; // } // } // } // if (no_need_add) { //// bp_checking_ins_count.measure_stop(); // continue; // } //// bp_checking_ins_count.measure_stop(); // if (SI_v_tail.is_candidate[label_root_id]) { // continue; // } // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // // if (!got_candidates[v_tail_local]) { // // If v_tail_global is not in got_candidates_queue, add it in (prevent duplicate) // got_candidates[v_tail_local] = true; // got_candidates_queue[end_got_candidates_queue++] = v_tail_local; // } // } // } // // { // assert(iter >= iter); // } //} //// DEPRECATED Function: in the scatter phase, synchronize local masters to mirrors on other hosts //// Has some mysterious problem: when I call this function, some hosts will receive wrong messages; when I copy all //// code of this function into the caller, all messages become right. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //sync_masters_2_mirrors( // const DistGraph &G, // const std::vector<VertexID> &active_queue, // VertexID end_active_queue, // std::vector< std::pair<VertexID, VertexID> > &buffer_send, // std::vector<MPI_Request> &requests_send //) //{ //// std::vector< std::pair<VertexID, VertexID> > buffer_send; // // pair.first: Owener vertex ID of the label // // pair.first: label vertex ID of the label // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send.emplace_back(v_head_global, label_root_id); //// {//test //// if (1 == host_id) { //// printf("@%u host_id: %u v_head_global: %u\n", __LINE__, host_id, v_head_global);// //// } //// } // } // } // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u sync_masters_2_mirrors: buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // assert(!requests_send.empty()); // } // // // Send messages // for (int loc = 0; loc < num_hosts - 1; ++loc) { // int dest_host_id = G.buffer_send_list_loc_2_master_host_id(loc); // MPI_Isend(buffer_send.data(), // MPI_Instance::get_sending_size(buffer_send), // MPI_CHAR, // dest_host_id, // SENDING_MASTERS_TO_MIRRORS, // MPI_COMM_WORLD, // &requests_send[loc]); // { // if (!buffer_send.empty()) { // printf("@%u host_id: %u dest_host_id: %u buffer_send.size: %lu buffer_send[0]:(%u %u)\n", __LINE__, host_id, dest_host_id, buffer_send.size(), buffer_send[0].first, buffer_send[0].second); // } // } // } //} template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: schedule_label_inserting_para( const DistGraph &G, const VertexID roots_start, const VertexID roots_size, std::vector<ShortIndex> &short_index, const std::vector< std::vector<UnweightedDist> > &dist_table, const std::vector<VertexID> &got_candidates_queue, const VertexID start_got_candidates_queue, const VertexID size_got_candidates_queue, std::vector<uint8_t> &got_candidates, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<uint8_t> &is_active, std::vector< std::pair<VertexID, VertexID> > &buffer_send, const VertexID iter) { const VertexID bound_got_candidates_queue = start_got_candidates_queue + size_got_candidates_queue; std::vector<VertexID> offsets_tmp_active_queue; std::vector<VertexID> tmp_active_queue; std::vector<VertexID> sizes_tmp_active_queue; std::vector<EdgeID> offsets_tmp_buffer_send; std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; std::vector<EdgeID> sizes_tmp_buffer_send; EdgeID total_send_labels; try { offsets_tmp_active_queue.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = 0; i_q < size_got_candidates_queue; ++i_q) { offsets_tmp_active_queue[i_q] = i_q; } tmp_active_queue.resize(size_got_candidates_queue); sizes_tmp_active_queue.resize(size_got_candidates_queue, 0); // Size will only be 0 or 1, but it will become offsets eventually. // Prepare for parallel buffer_send // std::vector<EdgeID> offsets_tmp_buffer_send(size_got_candidates_queue); offsets_tmp_buffer_send.resize(size_got_candidates_queue); #pragma omp parallel for for (VertexID i_q = start_got_candidates_queue; i_q < bound_got_candidates_queue; ++i_q) { VertexID v_id_local = got_candidates_queue[i_q]; VertexID v_global_id = G.get_global_vertex_id(v_id_local); VertexID tmp_i_q = i_q - start_got_candidates_queue; if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // If v_global_id is root, its new labels should be put into buffer_send offsets_tmp_buffer_send[tmp_i_q] = short_index[v_id_local].end_candidates_que; } else { offsets_tmp_buffer_send[tmp_i_q] = 0; } } total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); tmp_buffer_send.resize(total_send_labels); sizes_tmp_buffer_send.resize(size_got_candidates_queue, 0); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_tmp_buffer_send: bad_alloc " "host_id: %d " "iter: %u " "size_got_candidates_queue: %u " "total_send_labels: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, host_id, iter, size_got_candidates_queue, total_send_labels, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } #pragma omp parallel for for (VertexID i_queue = start_got_candidates_queue; i_queue < bound_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID tmp_i_queue = i_queue - start_got_candidates_queue; VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; tmp_active_queue[tmp_i_queue + sizes_tmp_active_queue[tmp_i_queue]++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_para( cand_root_id, v_id_local, roots_start, roots_size, G, tmp_buffer_send, sizes_tmp_buffer_send[tmp_i_queue], offsets_tmp_buffer_send[tmp_i_queue]); // buffer_send); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } {// Collect elements from tmp_active_queue to active_queue VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); PADO::collect_into_queue( tmp_active_queue, offsets_tmp_active_queue, sizes_tmp_active_queue, total_new, active_queue, end_active_queue); } {// Collect elements from tmp_buffer_send to buffer_send EdgeID old_size_buffer_send = buffer_send.size(); EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); try { buffer_send.resize(total_new + old_size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("L%u_buffer_send: bad_alloc " "iter: %u " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", __LINE__, iter, host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // EdgeID zero_size = 0; PADO::collect_into_queue( tmp_buffer_send, offsets_tmp_buffer_send, sizes_tmp_buffer_send, total_new, buffer_send, old_size_buffer_send); // zero_size); } } // Function for distance query; // traverse vertex v_id's labels; // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <VertexID BATCH_SIZE> inline bool DistBVCPLL<BATCH_SIZE>:: distance_query( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, // const std::vector<IndexType> &L, const std::vector< std::vector<UnweightedDist> > &dist_table, UnweightedDist iter) { VertexID cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id_local]; // Traverse v_id's all existing labels // VertexID b_i_bound = Lv.batches.size(); // _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); //_mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lv.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { VertexID dist_bound_index = Lv.distances.size(); for (VertexID dist_i = 0; dist_i < dist_bound_index; ++dist_i) { UnweightedDist dist = Lv.distances[dist_i].dist; // Cannot use this, because no batch_id any more, so distances are not all in order among batches. // if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // // If the half path distance is already greater than their targeted distance, jump to next batch // break; // } VertexID v_start_index = Lv.distances[dist_i].start_index; VertexID v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_table[cand_root_id][0], _MM_HINT_T0); _mm_prefetch(reinterpret_cast<const char *>(dist_table[cand_root_id].data()), _MM_HINT_T0); for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // VertexID v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id VertexID v = Lv.vertices[v_i]; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_table[cand_root_id][v] does not exist. continue; } VertexID d_tmp = dist + dist_table[cand_root_id][v]; if (d_tmp <= iter) { return false; } } } // } return true; } //// Sequential version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_seq( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::pair<VertexID, VertexID> > &buffer_send) // UnweightedDist iter) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // dist_table[v_root_id][cand_real_id] = iter; // Put the update into the buffer_send for later sending buffer_send.emplace_back(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_seq: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } //// Parallel Version // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_table; // but it only update the v_id's labels' vertices array; template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: insert_label_only_para( VertexID cand_root_id, VertexID v_id_local, VertexID roots_start, VertexID roots_size, const DistGraph &G, // std::vector< std::pair<VertexID, VertexID> > &buffer_send) std::vector< std::pair<VertexID, VertexID> > &tmp_buffer_send, EdgeID &size_tmp_buffer_send, const EdgeID offset_tmp_buffer_send) { try { VertexID cand_real_id = cand_root_id + roots_start; L[v_id_local].vertices.push_back(cand_real_id); // L[v_id_local].vertices.push_back(cand_root_id); // Update the distance buffer if v_id is a root VertexID v_id_global = G.get_global_vertex_id(v_id_local); VertexID v_root_id = v_id_global - roots_start; if (v_id_global >= roots_start && v_root_id < roots_size) { // VertexID cand_real_id = cand_root_id + roots_start; // Put the update into the buffer_send for later sending tmp_buffer_send[offset_tmp_buffer_send + size_tmp_buffer_send++] = std::make_pair(v_root_id, cand_real_id); } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("insert_label_only_para: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: update_label_indices( const VertexID v_id_local, const VertexID inserted_count, // std::vector<IndexType> &L, // std::vector<ShortIndex> &short_index, // VertexID b_id, const UnweightedDist iter) { try { IndexType &Lv = L[v_id_local]; // // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch // if (short_index[v_id_local].indicator[BATCH_SIZE]) { // // Increase the batches' last element's size because a new distance element need to be added // ++(Lv.batches.rbegin() -> size); // } else { // short_index[v_id_local].indicator[BATCH_SIZE] = 1; //// short_index[v_id_local].indicator.set(BATCH_SIZE); // // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added // Lv.batches.emplace_back( // b_id, // batch id // Lv.distances.size(), // start index // 1); // size // } // Insert a new distance element with start_index, size, and dist Lv.distances.emplace_back( Lv.vertices.size() - inserted_count, // start index inserted_count, // size iter); // distance } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("update_label_indices: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Function to reset dist_table the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: reset_at_end( const DistGraph &G, // VertexID roots_start, // const std::vector<VertexID> &roots_master_local, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, const std::vector<VertexID> &once_candidated_queue, const VertexID end_once_candidated_queue) { // // Reset dist_table according to local masters' labels // for (VertexID r_local_id : roots_master_local) { // IndexType &Lr = L[r_local_id]; // VertexID r_root_id = G.get_global_vertex_id(r_local_id) - roots_start; // VertexID b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (VertexID b_i = 0; b_i < b_i_bound; ++b_i) { // VertexID id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // VertexID dist_start_index = Lr.batches[b_i].start_index; // VertexID dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_table // for (VertexID dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // VertexID v_start_index = Lr.distances[dist_i].start_index; // VertexID v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (VertexID v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_table[r_root_id][Lr.vertices[v_i] + id_offset] = MAX_UNWEIGHTED_DIST; // } // } // } // } // Reset dist_table according to received masters' labels from other hosts for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { for (VertexID cand_real_id : recved_dist_table[r_root_id]) { dist_table[r_root_id][cand_real_id] = MAX_UNWEIGHTED_DIST; } recved_dist_table[r_root_id].clear(); } // Reset bit-parallel labels table for (VertexID r_root_id = 0; r_root_id < BATCH_SIZE; ++r_root_id) { memset(bp_labels_table[r_root_id].bp_dist, 0, sizeof(bp_labels_table[r_root_id].bp_dist)); memset(bp_labels_table[r_root_id].bp_sets, 0, sizeof(bp_labels_table[r_root_id].bp_sets)); } // Remove labels of local minimum set for (VertexID v_i = 0; v_i < end_once_candidated_queue; ++v_i) { VertexID v_local_id = once_candidated_queue[v_i]; if (!G.is_local_minimum[v_local_id]) { continue; } L[v_local_id].clean_all_indices(); } } template <VertexID BATCH_SIZE> inline void DistBVCPLL<BATCH_SIZE>:: batch_process( const DistGraph &G, // const VertexID b_id, const VertexID roots_start, // start id of roots const VertexID roots_size, // how many roots in the batch const std::vector<uint8_t> &used_bp_roots, std::vector<VertexID> &active_queue, VertexID &end_active_queue, std::vector<VertexID> &got_candidates_queue, VertexID &end_got_candidates_queue, std::vector<ShortIndex> &short_index, std::vector< std::vector<UnweightedDist> > &dist_table, std::vector< std::vector<VertexID> > &recved_dist_table, std::vector<BPLabelType> &bp_labels_table, std::vector<uint8_t> &got_candidates, // std::vector<bool> &got_candidates, std::vector<uint8_t> &is_active, // std::vector<bool> &is_active, std::vector<VertexID> &once_candidated_queue, VertexID &end_once_candidated_queue, std::vector<uint8_t> &once_candidated) // std::vector<bool> &once_candidated) { // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // The Maximum of active vertices among hosts. VertexID global_num_actives = initialization(G, short_index, dist_table, recved_dist_table, bp_labels_table, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, // b_id, roots_start, roots_size, // roots_master_local, used_bp_roots); // initializing_time += WallTimer::get_time_mark(); UnweightedDist iter = 0; // The iterator, also the distance for current iteration // {//test // if (0 == host_id) { // printf("host_id: %u initialization finished.\n", host_id); // } // } while (global_num_actives) { ++iter; //#ifdef DEBUG_MESSAGES_ON // {//test //// if (0 == host_id) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("iter: %u " // "host_id: %d " // "global_num_actives: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // iter, // host_id, // global_num_actives, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); //// } // } //#endif // Traverse active vertices to push their labels as candidates // Send masters' newly added labels to other hosts try { // scatter_time -= WallTimer::get_time_mark(); // Divide the pushing into many-time runs. const VertexID chunk_size = 1 << 12; VertexID remainder = global_num_actives % chunk_size; VertexID bound_global_i = global_num_actives - remainder; // VertexID remainder = end_active_queue % chunk_size; // VertexID bound_active_queue = end_active_queue - remainder; VertexID local_size; for (VertexID global_i = 0; global_i < bound_global_i; global_i += chunk_size) { if (global_i < end_active_queue) { local_size = end_active_queue - global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, global_i, chunk_size, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } if (remainder) { if (bound_global_i < end_active_queue) { local_size = end_active_queue - bound_global_i; } else { local_size = 0; } schedule_label_pushing_para( G, roots_start, used_bp_roots, active_queue, bound_global_i, remainder, local_size, got_candidates_queue, end_got_candidates_queue, short_index, bp_labels_table, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated, iter); } // // schedule_label_pushing_para( // G, // roots_start, // used_bp_roots, // active_queue, // 0, // end_active_queue, // got_candidates_queue, // end_got_candidates_queue, // short_index, // bp_labels_table, // got_candidates, // is_active, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // iter); end_active_queue = 0; // scatter_time += WallTimer::get_time_mark(); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("pushing: bad_alloc " "iter: %u " "host_id: %d " "global_num_actives: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", iter, host_id, global_num_actives, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } // Traverse vertices in the got_candidates_queue to insert labels { // gather_time -= WallTimer::get_time_mark(); std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // pair.first: root id // pair.second: label (global) id of the root // if (true) { if (end_got_candidates_queue >= THRESHOLD_PARALLEL) { const VertexID chunk_size = 1 << 12; VertexID remainder = end_got_candidates_queue % chunk_size; VertexID bound_i_q = end_got_candidates_queue - remainder; for (VertexID i_q = 0; i_q < bound_i_q; i_q += chunk_size) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, i_q, chunk_size, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } if (remainder) { schedule_label_inserting_para( G, roots_start, roots_size, short_index, dist_table, got_candidates_queue, bound_i_q, remainder, got_candidates, active_queue, end_active_queue, is_active, buffer_send, iter); } ////// Backup // // Prepare for parallel active_queue // // Don't need offsets_tmp_active_queue here, because the index i_queue is the offset already. // // Actually we still need offsets_tmp_active_queue, because collect_into_queue() needs it. // std::vector<VertexID> offsets_tmp_active_queue; // std::vector<VertexID> tmp_active_queue; // std::vector<VertexID> sizes_tmp_active_queue; // std::vector<EdgeID> offsets_tmp_buffer_send; // std::vector< std::pair<VertexID, VertexID> > tmp_buffer_send; // std::vector<EdgeID> sizes_tmp_buffer_send; // EdgeID total_send_labels; // // try { // offsets_tmp_active_queue.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // offsets_tmp_active_queue[i_q] = i_q; // } // tmp_active_queue.resize(end_got_candidates_queue); // sizes_tmp_active_queue.resize(end_got_candidates_queue, // 0); // Size will only be 0 or 1, but it will become offsets eventually. // // // Prepare for parallel buffer_send //// std::vector<EdgeID> offsets_tmp_buffer_send(end_got_candidates_queue); // offsets_tmp_buffer_send.resize(end_got_candidates_queue); //#pragma omp parallel for // for (VertexID i_q = 0; i_q < end_got_candidates_queue; ++i_q) { // VertexID v_id_local = got_candidates_queue[i_q]; // VertexID v_global_id = G.get_global_vertex_id(v_id_local); // if (v_global_id >= roots_start && v_global_id < roots_start + roots_size) { // // If v_global_id is root, its new labels should be put into buffer_send // offsets_tmp_buffer_send[i_q] = short_index[v_id_local].end_candidates_que; // } else { // offsets_tmp_buffer_send[i_q] = 0; // } // } // total_send_labels = PADO::prefix_sum_for_offsets(offsets_tmp_buffer_send); // tmp_buffer_send.resize(total_send_labels); // sizes_tmp_buffer_send.resize(end_got_candidates_queue, 0); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_tmp_buffer_send: bad_alloc " // "host_id: %d " // "iter: %u " // "end_got_candidates_queue: %u " // "total_send_labels: %u " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // host_id, // iter, // end_got_candidates_queue, // total_send_labels, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // //#pragma omp parallel for // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if (distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter)) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; //// active_queue[end_active_queue++] = v_id_local; // tmp_active_queue[i_queue + sizes_tmp_active_queue[i_queue]++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only_para( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, // tmp_buffer_send, // sizes_tmp_buffer_send[i_queue], // offsets_tmp_buffer_send[i_queue]); //// buffer_send); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, //// short_index, //// b_id, // iter); // } // } // // {// Collect elements from tmp_active_queue to active_queue // VertexID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_active_queue); // PADO::collect_into_queue( // tmp_active_queue, // offsets_tmp_active_queue, // sizes_tmp_active_queue, // total_new, // active_queue, // end_active_queue); // } // {// Collect elements from tmp_buffer_send to buffer_send // EdgeID total_new = PADO::prefix_sum_for_offsets(sizes_tmp_buffer_send); // try { // buffer_send.resize(total_new); // } // catch (const std::bad_alloc &) { // double memtotal = 0; // double memfree = 0; // PADO::Utils::system_memory(memtotal, memfree); // printf("L%u_buffer_send: bad_alloc " // "iter: %u " // "host_id: %d " // "L.size(): %.2fGB " // "memtotal: %.2fGB " // "memfree: %.2fGB\n", // __LINE__, // iter, // host_id, // get_index_size() * 1.0 / (1 << 30), // memtotal / 1024, // memfree / 1024); // exit(1); // } // EdgeID zero_size = 0; // PADO::collect_into_queue( // tmp_buffer_send, // offsets_tmp_buffer_send, // sizes_tmp_buffer_send, // total_new, // buffer_send, // zero_size); // } } else { for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { VertexID v_id_local = got_candidates_queue[i_queue]; VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id_local] = 0; // reset got_candidates // Traverse v_id's all candidates VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; short_index[v_id_local].is_candidate[cand_root_id] = 0; // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if (distance_query( cand_root_id, v_id_local, roots_start, // L, dist_table, iter)) { if (!is_active[v_id_local]) { is_active[v_id_local] = 1; active_queue[end_active_queue++] = v_id_local; } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only_seq( cand_root_id, v_id_local, roots_start, roots_size, G, // dist_table, buffer_send); // iter); } } short_index[v_id_local].end_candidates_que = 0; if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id_local, inserted_count, // L, // short_index, // b_id, iter); } } } // {//test // printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); // } end_got_candidates_queue = 0; // Set the got_candidates_queue empty // Sync the dist_table for (int root = 0; root < num_hosts; ++root) { std::vector<std::pair<VertexID, VertexID>> buffer_recv; one_host_bcasts_buffer_to_buffer(root, buffer_send, buffer_recv); if (buffer_recv.empty()) { continue; } EdgeID size_buffer_recv = buffer_recv.size(); try { if (size_buffer_recv >= THRESHOLD_PARALLEL) { // Get label number for every root std::vector<VertexID> sizes_recved_root_labels(roots_size, 0); #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; __atomic_add_fetch(sizes_recved_root_labels.data() + root_id, 1, __ATOMIC_SEQ_CST); } // Resize the recved_dist_table for every root #pragma omp parallel for for (VertexID root_id = 0; root_id < roots_size; ++root_id) { VertexID old_size = recved_dist_table[root_id].size(); VertexID tmp_size = sizes_recved_root_labels[root_id]; if (tmp_size) { recved_dist_table[root_id].resize(old_size + tmp_size); sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // If tmp_size == 0, root_id has no received labels. // sizes_recved_root_labels[root_id] = old_size; // sizes_recved_root_labels now records old_size } // Recorde received labels in recved_dist_table #pragma omp parallel for for (EdgeID i_l = 0; i_l < size_buffer_recv; ++i_l) { const std::pair<VertexID, VertexID> &e = buffer_recv[i_l]; VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; PADO::TS_enqueue(recved_dist_table[root_id], sizes_recved_root_labels[root_id], cand_real_id); } } else { for (const std::pair<VertexID, VertexID> &e : buffer_recv) { VertexID root_id = e.first; VertexID cand_real_id = e.second; dist_table[root_id][cand_real_id] = iter; // Record the received element, for future reset recved_dist_table[root_id].push_back(cand_real_id); } } } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("recved_dist_table: bad_alloc " "host_id: %d " "iter: %u " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, iter, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } } // Sync the global_num_actives MPI_Allreduce(&end_active_queue, &global_num_actives, 1, V_ID_Type, MPI_MAX, // MPI_SUM, MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); } // {//test // if (0 == host_id) { // printf("iter: %u inserting labels finished.\n", iter); // } // } } // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); reset_at_end( G, // roots_start, // roots_master_local, dist_table, recved_dist_table, bp_labels_table, once_candidated_queue, end_once_candidated_queue); // clearup_time += WallTimer::get_time_mark(); // {//test // if (0 == host_id) { // printf("host_id: %u resetting finished.\n", host_id); // } // } } //// Sequential Version //template <VertexID BATCH_SIZE> //inline void DistBVCPLL<BATCH_SIZE>:: //batch_process( // const DistGraph &G, // VertexID b_id, // VertexID roots_start, // start id of roots // VertexID roots_size, // how many roots in the batch // const std::vector<uint8_t> &used_bp_roots, // std::vector<VertexID> &active_queue, // VertexID &end_active_queue, // std::vector<VertexID> &got_candidates_queue, // VertexID &end_got_candidates_queue, // std::vector<ShortIndex> &short_index, // std::vector< std::vector<UnweightedDist> > &dist_table, // std::vector< std::vector<VertexID> > &recved_dist_table, // std::vector<BPLabelType> &bp_labels_table, // std::vector<uint8_t> &got_candidates, //// std::vector<bool> &got_candidates, // std::vector<uint8_t> &is_active, //// std::vector<bool> &is_active, // std::vector<VertexID> &once_candidated_queue, // VertexID &end_once_candidated_queue, // std::vector<uint8_t> &once_candidated) //// std::vector<bool> &once_candidated) //{ // // At the beginning of a batch, initialize the labels L and distance buffer dist_table; // initializing_time -= WallTimer::get_time_mark(); // VertexID global_num_actives = initialization(G, // short_index, // dist_table, // recved_dist_table, // bp_labels_table, // active_queue, // end_active_queue, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // b_id, // roots_start, // roots_size, //// roots_master_local, // used_bp_roots); // initializing_time += WallTimer::get_time_mark(); // UnweightedDist iter = 0; // The iterator, also the distance for current iteration //// {//test //// printf("host_id: %u initialization finished.\n", host_id); //// } // // // while (global_num_actives) { ////#ifdef DEBUG_MESSAGES_ON //// {// //// if (0 == host_id) { //// printf("iter: %u global_num_actives: %u\n", iter, global_num_actives); //// } //// } ////#endif // ++iter; // // Traverse active vertices to push their labels as candidates // // Send masters' newly added labels to other hosts // { // scatter_time -= WallTimer::get_time_mark(); // std::vector<std::pair<VertexID, VertexID> > buffer_send_indices(end_active_queue); // //.first: Vertex ID // //.second: size of labels // std::vector<VertexID> buffer_send_labels; // // Prepare masters' newly added labels for sending // for (VertexID i_q = 0; i_q < end_active_queue; ++i_q) { // VertexID v_head_local = active_queue[i_q]; // is_active[v_head_local] = 0; // reset is_active // VertexID v_head_global = G.get_global_vertex_id(v_head_local); // const IndexType &Lv = L[v_head_local]; // // Prepare the buffer_send_indices // buffer_send_indices[i_q] = std::make_pair(v_head_global, Lv.distances.rbegin()->size); // // These 2 index are used for traversing v_head's last inserted labels // VertexID l_i_start = Lv.distances.rbegin()->start_index; // VertexID l_i_bound = l_i_start + Lv.distances.rbegin()->size; // for (VertexID l_i = l_i_start; l_i < l_i_bound; ++l_i) { // VertexID label_root_id = Lv.vertices[l_i]; // buffer_send_labels.push_back(label_root_id); // } // } // end_active_queue = 0; // // for (int root = 0; root < num_hosts; ++root) { // // Get the indices // std::vector< std::pair<VertexID, VertexID> > indices_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_indices, // indices_buffer); // if (indices_buffer.empty()) { // continue; // } // // Get the labels // std::vector<VertexID> labels_buffer; // one_host_bcasts_buffer_to_buffer(root, // buffer_send_labels, // labels_buffer); // // Push those labels // EdgeID start_index = 0; // for (const std::pair<VertexID, VertexID> e : indices_buffer) { // VertexID v_head_global = e.first; // EdgeID bound_index = start_index + e.second; // if (G.local_out_degrees[v_head_global]) { // local_push_labels( // v_head_global, // start_index, // bound_index, // roots_start, // labels_buffer, // G, // short_index, // got_candidates_queue, // end_got_candidates_queue, // got_candidates, // once_candidated_queue, // end_once_candidated_queue, // once_candidated, // bp_labels_table, // used_bp_roots, // iter); // } // start_index = bound_index; // } // } // scatter_time += WallTimer::get_time_mark(); // } // // // Traverse vertices in the got_candidates_queue to insert labels // { // gather_time -= WallTimer::get_time_mark(); // std::vector< std::pair<VertexID, VertexID> > buffer_send; // For sync elements in the dist_table // // pair.first: root id // // pair.second: label (global) id of the root // for (VertexID i_queue = 0; i_queue < end_got_candidates_queue; ++i_queue) { // VertexID v_id_local = got_candidates_queue[i_queue]; // VertexID inserted_count = 0; //recording number of v_id's truly inserted candidates // got_candidates[v_id_local] = 0; // reset got_candidates // // Traverse v_id's all candidates // VertexID bound_cand_i = short_index[v_id_local].end_candidates_que; // for (VertexID cand_i = 0; cand_i < bound_cand_i; ++cand_i) { // VertexID cand_root_id = short_index[v_id_local].candidates_que[cand_i]; // short_index[v_id_local].is_candidate[cand_root_id] = 0; // // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance // if ( distance_query( // cand_root_id, // v_id_local, // roots_start, // // L, // dist_table, // iter) ) { // if (!is_active[v_id_local]) { // is_active[v_id_local] = 1; // active_queue[end_active_queue++] = v_id_local; // } // ++inserted_count; // // The candidate cand_root_id needs to be added into v_id's label // insert_label_only( // cand_root_id, // v_id_local, // roots_start, // roots_size, // G, //// dist_table, // buffer_send); //// iter); // } // } // short_index[v_id_local].end_candidates_que = 0; // if (0 != inserted_count) { // // Update other arrays in L[v_id] if new labels were inserted in this iteration // update_label_indices( // v_id_local, // inserted_count, // // L, // short_index, // b_id, // iter); // } // } //// {//test //// printf("host_id: %u gather: buffer_send.size(); %lu bytes: %lu\n", host_id, buffer_send.size(), MPI_Instance::get_sending_size(buffer_send)); //// } // end_got_candidates_queue = 0; // Set the got_candidates_queue empty // // Sync the dist_table // for (int root = 0; root < num_hosts; ++root) { // std::vector<std::pair<VertexID, VertexID>> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } // for (const std::pair<VertexID, VertexID> &e : buffer_recv) { // VertexID root_id = e.first; // VertexID cand_real_id = e.second; // dist_table[root_id][cand_real_id] = iter; // // Record the received element, for future reset // recved_dist_table[root_id].push_back(cand_real_id); // } // } // // // Sync the global_num_actives // MPI_Allreduce(&end_active_queue, // &global_num_actives, // 1, // V_ID_Type, // MPI_SUM, // MPI_COMM_WORLD); // gather_time += WallTimer::get_time_mark(); // } // } // // // Reset the dist_table // clearup_time -= WallTimer::get_time_mark(); // reset_at_end( //// G, //// roots_start, //// roots_master_local, // dist_table, // recved_dist_table, // bp_labels_table); // clearup_time += WallTimer::get_time_mark(); //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Every host h_i broadcast to others // for (int root = 0; root < num_hosts; ++root) { // std::vector<E_T> buffer_recv; // one_host_bcasts_buffer_to_buffer(root, // buffer_send, // buffer_recv); // if (buffer_recv.empty()) { // continue; // } //// uint64_t size_buffer_send = buffer_send.size(); //// // Sync the size_buffer_send. //// message_time -= WallTimer::get_time_mark(); //// MPI_Bcast(&size_buffer_send, //// 1, //// MPI_UINT64_T, //// root, //// MPI_COMM_WORLD); //// message_time += WallTimer::get_time_mark(); ////// {// test ////// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); ////// } //// if (!size_buffer_send) { //// continue; //// } //// message_time -= WallTimer::get_time_mark(); //// std::vector<E_T> buffer_recv(size_buffer_send); //// if (host_id == root) { //// buffer_recv.assign(buffer_send.begin(), buffer_send.end()); //// } //// uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; //// if (bytes_buffer_send < static_cast<size_t>(INT_MAX)) { //// // Only need 1 broadcast //// //// MPI_Bcast(buffer_recv.data(), //// bytes_buffer_send, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// } else { //// const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; //// const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; //// size_t offset = 0; //// for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { ////// size_t offset = b_i * unit_buffer_size; //// size_t size_unit_buffer = b_i == num_unit_buffers - 1 //// ? size_buffer_send - offset //// : unit_buffer_size; //// MPI_Bcast(buffer_recv.data() + offset, //// size_unit_buffer * ETypeSize, //// MPI_CHAR, //// root, //// MPI_COMM_WORLD); //// offset += unit_buffer_size; //// } //// } //// message_time += WallTimer::get_time_mark(); // for (const E_T &e : buffer_recv) { // fun(e); // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // // Every host sends to others // for (int src = 0; src < num_hosts; ++src) { // if (host_id == src) { // // Send from src // message_time -= WallTimer::get_time_mark(); // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, host_id); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // } // message_time += WallTimer::get_time_mark(); // } else { // // Receive from src // for (int hop = 1; hop < num_hosts; ++hop) { // int dst = hop_2_root_host_id(hop, src); // if (host_id == dst) { // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } // } //} //// Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // // Host processes locally. // for (const E_T &e : buffer_send) { // fun(e); // } // // Every host sends (num_hosts - 1) times // for (int hop = 1; hop < num_hosts; ++hop) { // int src = hop_2_me_host_id(-hop); // int dst = hop_2_me_host_id(hop); // if (src != dst) { // Normal case // // When host_id is odd, first receive, then send. // if (static_cast<uint32_t>(host_id) & 1U) { // message_time -= WallTimer::get_time_mark(); // // Receive first. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // // Send then. // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // When host_id is even, first send, then receive. // // Send first. // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u send_to: %u\n", host_id, dst); // } // // Receive then. // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // {//test // printf("host_id: %u recved_from: %u\n", host_id, src); // } // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } else { // If host_id is higher than dst, first send, then receive // // This is a special case. It only happens when the num_hosts is even and hop equals to num_hosts/2. // if (host_id < dst) { // // Send // message_time -= WallTimer::get_time_mark(); // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Receive // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } else { // Otherwise, if host_id is lower than dst, first receive, then send // // Receive // message_time -= WallTimer::get_time_mark(); // std::vector<E_T> buffer_recv; // MPI_Instance::recv_buffer_from_src(buffer_recv, // src, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // // Send // MPI_Instance::send_buffer_2_dst(buffer_send, // dst, // SENDING_BUFFER_SEND, // SENDING_SIZE_BUFFER_SEND); // message_time += WallTimer::get_time_mark(); // // Process // if (buffer_recv.empty()) { // continue; // } // for (const E_T &e : buffer_recv) { // fun(e); // } // } // } // } //} //// DEPRECATED version Function: every host broadcasts its sending buffer, and does fun for every element it received in the unit buffer. //template <VertexID BATCH_SIZE, VertexID BITPARALLEL_SIZE> //template <typename E_T, typename F> //inline void DistBVCPLL<BATCH_SIZE, BITPARALLEL_SIZE>:: //every_host_bcasts_buffer_and_proc( // std::vector<E_T> &buffer_send, // F &fun) //{ // const uint32_t UNIT_BUFFER_SIZE = 16U << 20U; // // Every host h_i broadcast to others // for (int h_i = 0; h_i < num_hosts; ++h_i) { // uint64_t size_buffer_send = buffer_send.size(); // // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); // MPI_Bcast(&size_buffer_send, // 1, // MPI_UINT64_T, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); //// {// test //// printf("host_id: %u h_i: %u bcast_buffer_send.size(): %lu\n", host_id, h_i, size_buffer_send); //// } // if (!size_buffer_send) { // continue; // } // uint32_t num_unit_buffers = (size_buffer_send + UNIT_BUFFER_SIZE - 1) / UNIT_BUFFER_SIZE; // // // Broadcast the buffer_send // for (uint32_t b_i = 0; b_i < num_unit_buffers; ++b_i) { // // Prepare the unit buffer // message_time -= WallTimer::get_time_mark(); // size_t offset = b_i * UNIT_BUFFER_SIZE; // size_t size_unit_buffer = b_i == num_unit_buffers - 1 // ? size_buffer_send - offset // : UNIT_BUFFER_SIZE; // std::vector<E_T> unit_buffer(size_unit_buffer); // // Copy the messages from buffer_send to unit buffer. // if (host_id == h_i) { // unit_buffer.assign(buffer_send.begin() + offset, buffer_send.begin() + offset + size_unit_buffer); // } // // Broadcast the unit buffer // MPI_Bcast(unit_buffer.data(), // MPI_Instance::get_sending_size(unit_buffer), // MPI_CHAR, // h_i, // MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); // // Process every element of unit_buffer // for (const E_T &e : unit_buffer) { // fun(e); // } // } // } //} // Function: Host root broadcasts its sending buffer to a receiving buffer. template <VertexID BATCH_SIZE> template <typename E_T> inline void DistBVCPLL<BATCH_SIZE>:: one_host_bcasts_buffer_to_buffer( int root, std::vector<E_T> &buffer_send, std::vector<E_T> &buffer_recv) { const size_t ETypeSize = sizeof(E_T); uint64_t size_buffer_send = buffer_send.size(); // Sync the size_buffer_send. // message_time -= WallTimer::get_time_mark(); MPI_Bcast(&size_buffer_send, 1, MPI_UINT64_T, root, MPI_COMM_WORLD); // message_time += WallTimer::get_time_mark(); try { buffer_recv.resize(size_buffer_send); } catch (const std::bad_alloc &) { double memtotal = 0; double memfree = 0; PADO::Utils::system_memory(memtotal, memfree); printf("one_host_bcasts_buffer_to_buffer: bad_alloc " "host_id: %d " "L.size(): %.2fGB " "memtotal: %.2fGB " "memfree: %.2fGB\n", host_id, get_index_size() * 1.0 / (1 << 30), memtotal / 1024, memfree / 1024); exit(1); } if (!size_buffer_send) { return; } // Broadcast the buffer_send // message_time -= WallTimer::get_time_mark(); if (host_id == root) { // buffer_recv.assign(buffer_send.begin(), buffer_send.end()); buffer_recv.swap(buffer_send); } uint64_t bytes_buffer_send = size_buffer_send * ETypeSize; if (bytes_buffer_send <= static_cast<size_t>(INT_MAX)) { // Only need 1 broadcast MPI_Bcast(buffer_recv.data(), bytes_buffer_send, MPI_CHAR, root, MPI_COMM_WORLD); } else { const uint32_t num_unit_buffers = ((bytes_buffer_send - 1) / static_cast<size_t>(INT_MAX)) + 1; const uint64_t unit_buffer_size = ((size_buffer_send - 1) / num_unit_buffers) + 1; size_t offset = 0; for (uint64_t b_i = 0; b_i < num_unit_buffers; ++b_i) { size_t size_unit_buffer = b_i == num_unit_buffers - 1 ? size_buffer_send - offset : unit_buffer_size; MPI_Bcast(buffer_recv.data() + offset, size_unit_buffer * ETypeSize, MPI_CHAR, root, MPI_COMM_WORLD); offset += unit_buffer_size; } } // message_time += WallTimer::get_time_mark(); } } #endif //PADO_DPADO_H
convolutiondepthwise_3x3_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const unsigned short* k0 = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out.row<unsigned short>(0); const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; #if __aarch64__ unsigned short* outptr1 = out.row<unsigned short>(1); const unsigned short* r3 = img0.row<const unsigned short>(3); for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r10 r11 r12 r13 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "prfm pldl1keep, [%3, #128] \n" "ld1 {v28.4h, v29.4h}, [%3] \n" // r14 r15 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "mov v18.16b, %21.16b \n" // sum02 "mov v19.16b, %21.16b \n" // sum03 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "mov v20.16b, %21.16b \n" // sum10 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "mov v21.16b, %21.16b \n" // sum11 "fmla v18.4s, %15.4s, v12.4s \n" "fmla v19.4s, %15.4s, v13.4s \n" "mov v22.16b, %21.16b \n" // sum12 "fmla v20.4s, %12.4s, v10.4s \n" "fmla v21.4s, %12.4s, v11.4s \n" "mov v23.16b, %21.16b \n" // sum13 "fmla v22.4s, %12.4s, v12.4s \n" "fmla v23.4s, %12.4s, v13.4s \n" "shll v28.4s, v28.4h, #16 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "shll v29.4s, v29.4h, #16 \n" "fmla v18.4s, %16.4s, v13.4s \n" "fmla v19.4s, %16.4s, v28.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%4], #32 \n" // r20 r21 r22 r23 "fmla v20.4s, %13.4s, v11.4s \n" "fmla v21.4s, %13.4s, v12.4s \n" "fmla v22.4s, %13.4s, v13.4s \n" "fmla v23.4s, %13.4s, v28.4s \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v14.4h, v15.4h}, [%4] \n" // r24 r25 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v18.4s, %17.4s, v28.4s \n" "fmla v19.4s, %17.4s, v29.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v20.4s, %14.4s, v12.4s \n" "fmla v21.4s, %14.4s, v13.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2], #32 \n" // r00 r01 r02 r03 "fmla v22.4s, %14.4s, v28.4s \n" "fmla v23.4s, %14.4s, v29.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v16.4s, %18.4s, v24.4s \n" "fmla v17.4s, %18.4s, v25.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v18.4s, %18.4s, v26.4s \n" "fmla v19.4s, %18.4s, v27.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%5], #32 \n" // r30 r31 r32 r33 "fmla v20.4s, %15.4s, v24.4s \n" "fmla v21.4s, %15.4s, v25.4s \n" "shll v14.4s, v14.4h, #16 \n" "fmla v22.4s, %15.4s, v26.4s \n" "fmla v23.4s, %15.4s, v27.4s \n" "shll v15.4s, v15.4h, #16 \n" "fmla v16.4s, %19.4s, v25.4s \n" "fmla v17.4s, %19.4s, v26.4s \n" "fmla v18.4s, %19.4s, v27.4s \n" "fmla v19.4s, %19.4s, v14.4s \n" "fmla v20.4s, %16.4s, v25.4s \n" "fmla v21.4s, %16.4s, v26.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v24.4h, v25.4h}, [%2] \n" // r04 r05 "fmla v22.4s, %16.4s, v27.4s \n" "fmla v23.4s, %16.4s, v14.4s \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, %20.4s, v26.4s \n" "fmla v17.4s, %20.4s, v27.4s \n" "shll v12.4s, v12.4h, #16 \n" "fmla v18.4s, %20.4s, v14.4s \n" "fmla v19.4s, %20.4s, v15.4s \n" "shll v13.4s, v13.4h, #16 \n" "fmla v20.4s, %17.4s, v26.4s \n" "fmla v21.4s, %17.4s, v27.4s \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v26.4h, v27.4h}, [%5] \n" // r34 r35 "fmla v22.4s, %17.4s, v14.4s \n" "fmla v23.4s, %17.4s, v15.4s \n" "shll v28.4s, v28.4h, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "shll v29.4s, v29.4h, #16 \n" "fmla v18.4s, %12.4s, v12.4s \n" "fmla v19.4s, %12.4s, v13.4s \n" "shll v30.4s, v30.4h, #16 \n" "fmla v20.4s, %18.4s, v28.4s \n" "fmla v21.4s, %18.4s, v29.4s \n" "shll v31.4s, v31.4h, #16 \n" "fmla v22.4s, %18.4s, v30.4s \n" "fmla v23.4s, %18.4s, v31.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %13.4s, v13.4s \n" "fmla v19.4s, %13.4s, v24.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v20.4s, %19.4s, v29.4s \n" "fmla v21.4s, %19.4s, v30.4s \n" "fmla v22.4s, %19.4s, v31.4s \n" "fmla v23.4s, %19.4s, v26.4s \n" "shll v25.4s, v25.4h, #16 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %14.4s, v24.4s \n" "fmla v19.4s, %14.4s, v25.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v20.4s, %20.4s, v30.4s \n" "fmla v21.4s, %20.4s, v31.4s \n" "fmla v22.4s, %20.4s, v26.4s \n" "fmla v23.4s, %20.4s, v27.4s \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "shrn v20.4h, v20.4s, #16 \n" "shrn v21.4h, v21.4s, #16 \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n" "st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3] \n" // r10 r11 r12 r13 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "mov v18.16b, %21.16b \n" // sum10 "mov v19.16b, %21.16b \n" // sum11 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "shll v12.4s, v12.4h, #16 \n" "fmla v18.4s, %12.4s, v10.4s \n" "fmla v19.4s, %12.4s, v11.4s \n" "shll v13.4s, v13.4h, #16 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v11.4s \n" "fmla v19.4s, %13.4s, v12.4s \n" "shll v20.4s, v20.4h, #16 \n" "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "shll v21.4s, v21.4h, #16 \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "shll v22.4s, v22.4h, #16 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %18.4s, v21.4s \n" "shll v23.4s, v23.4h, #16 \n" "fmla v18.4s, %15.4s, v20.4s \n" "fmla v19.4s, %15.4s, v21.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2] \n" // r00 r01 r02 r03 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %19.4s, v22.4s \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5] \n" // r30 r31 r32 r33 "fmla v18.4s, %16.4s, v21.4s \n" "fmla v19.4s, %16.4s, v22.4s \n" "shll v10.4s, v10.4h, #16 \n" "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %20.4s, v23.4s \n" "shll v24.4s, v24.4h, #16 \n" "fmla v18.4s, %17.4s, v22.4s \n" "fmla v19.4s, %17.4s, v23.4s \n" "shll v11.4s, v11.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "shll v12.4s, v12.4h, #16 \n" "fmla v18.4s, %18.4s, v24.4s \n" "fmla v19.4s, %18.4s, v25.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "shll v13.4s, v13.4h, #16 \n" "fmla v18.4s, %19.4s, v25.4s \n" "fmla v19.4s, %19.4s, v26.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "add %3, %3, #16 \n" "fmla v18.4s, %20.4s, v26.4s \n" "fmla v19.4s, %20.4s, v27.4s \n" "add %4, %4, #16 \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "add %2, %2, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "add %5, %5, #16 \n" "st1 {v16.4h, v17.4h}, [%0], #16 \n" "st1 {v18.4h, v19.4h}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #192] \n" "ld1 {v10.4h, v11.4h, v12.4h}, [%3] \n" // r10 r11 r12 "mov v18.16b, %21.16b \n" // sum0 "mov v19.16b, %21.16b \n" // sum1 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmul v16.4s, %15.4s, v10.4s \n" "fmul v17.4s, %12.4s, v10.4s \n" "shll v12.4s, v12.4h, #16 \n" "fmla v18.4s, %16.4s, v11.4s \n" "fmla v19.4s, %13.4s, v11.4s \n" "prfm pldl1keep, [%4, #192] \n" "ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n" // r20 r21 r22 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %14.4s, v12.4s \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "fmla v18.4s, %18.4s, v20.4s \n" "fmla v19.4s, %15.4s, v20.4s \n" "prfm pldl1keep, [%2, #192] \n" "ld1 {v10.4h, v11.4h, v12.4h}, [%2] \n" // r00 r01 r02 "shll v22.4s, v22.4h, #16 \n" "prfm pldl1keep, [%5, #192] \n" "ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n" // r30 r31 r32 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %16.4s, v21.4s \n" "shll v10.4s, v10.4h, #16 \n" "shll v24.4s, v24.4h, #16 \n" "fmla v18.4s, %20.4s, v22.4s \n" "fmla v19.4s, %17.4s, v22.4s \n" "shll v11.4s, v11.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %18.4s, v24.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v26.4s, v26.4h, #16 \n" "fmla v18.4s, %13.4s, v11.4s \n" "fmla v19.4s, %19.4s, v25.4s \n" "add %3, %3, #8 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %20.4s, v26.4s \n" "add %4, %4, #8 \n" "fadd v18.4s, v18.4s, v16.4s \n" "fadd v19.4s, v19.4s, v17.4s \n" "add %2, %2, #8 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "add %5, %5, #8 \n" "st1 {v18.4h}, [%0], #8 \n" "st1 {v19.4h}, [%1], #8 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"); } r0 += 2 * 4 + w * 4; r1 += 2 * 4 + w * 4; r2 += 2 * 4 + w * 4; r3 += 2 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "mov v18.16b, %17.16b \n" // sum02 "mov v19.16b, %17.16b \n" // sum03 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, %8.4s, v10.4s \n" "fmla v17.4s, %8.4s, v11.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmla v18.4s, %8.4s, v12.4s \n" "fmla v19.4s, %8.4s, v13.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v14.4h, v15.4h}, [%1] \n" // r04 r05 "fmla v16.4s, %9.4s, v11.4s \n" "fmla v17.4s, %9.4s, v12.4s \n" "shll v14.4s, v14.4h, #16 \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v12.4s \n" "fmla v17.4s, %10.4s, v13.4s \n" "shll v15.4s, v15.4h, #16 \n" "fmla v18.4s, %10.4s, v14.4s \n" "fmla v19.4s, %10.4s, v15.4s \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "fmla v16.4s, %11.4s, v20.4s \n" "fmla v17.4s, %11.4s, v21.4s \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v18.4s, %11.4s, v22.4s \n" "fmla v19.4s, %11.4s, v23.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v14.4h, v15.4h}, [%2] \n" // r14 r15 "fmla v16.4s, %12.4s, v21.4s \n" "fmla v17.4s, %12.4s, v22.4s \n" "shll v14.4s, v14.4h, #16 \n" "fmla v18.4s, %12.4s, v23.4s \n" "fmla v19.4s, %12.4s, v14.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v16.4s, %13.4s, v22.4s \n" "fmla v17.4s, %13.4s, v23.4s \n" "shll v15.4s, v15.4h, #16 \n" "fmla v18.4s, %13.4s, v14.4s \n" "fmla v19.4s, %13.4s, v15.4s \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v16.4s, %14.4s, v10.4s \n" "fmla v17.4s, %14.4s, v11.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v14.4h, v15.4h}, [%3] \n" // r24 r25 "fmla v16.4s, %15.4s, v11.4s \n" "fmla v17.4s, %15.4s, v12.4s \n" "shll v14.4s, v14.4h, #16 \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v12.4s \n" "fmla v17.4s, %16.4s, v13.4s \n" "shll v15.4s, v15.4h, #16 \n" "fmla v18.4s, %16.4s, v14.4s \n" "fmla v19.4s, %16.4s, v15.4s \n" "shrn v16.4h, v16.4s, #16 \n" "shrn v17.4h, v17.4s, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #128] \n" "vld1.u16 {d30-d31}, [%1 :64]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q8, q14 \n" "vmla.f32 q11, %q8, q15 \n" "vmla.f32 q10, %q9, q15 \n" "pld [%1, #128] \n" "vld1.u16 {d30-d31}, [%1 :64]! \n" // r02 r03 "vmov q12, %q17 \n" // sum02 "vmov q13, %q17 \n" // sum03 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q13, %q8, q15 \n" "vmla.f32 q10, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "vmla.f32 q11, %q10, q15 \n" // "pld [%1, #128] \n" "vld1.u16 {d30-d31}, [%1 :64] \n" // r04 r05 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q13, %q9, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q10, q15 \n" "pld [%2, #128] \n" "vld1.u16 {d30-d31}, [%2 :64]! \n" // r10 r11 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q11, q14 \n" "vmla.f32 q11, %q11, q15 \n" "vmla.f32 q10, %q12, q15 \n" "pld [%2, #128] \n" "vld1.u16 {d30-d31}, [%2 :64]! \n" // r12 r13 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q13, %q11, q15 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "vmla.f32 q11, %q13, q15 \n" // "pld [%2, #128] \n" "vld1.u16 {d30-d31}, [%2 :64] \n" // r14 r15 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q13, %q12, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q13, q15 \n" "pld [%3, #128] \n" "vld1.u16 {d30-d31}, [%3 :64]! \n" // r20 r21 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q11, %q14, q15 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%3, #128] \n" "vld1.u16 {d30-d31}, [%3 :64]! \n" // r22 r23 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q13, %q14, q15 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "vmla.f32 q11, %q16, q15 \n" // "pld [%3, #128] \n" "vld1.u16 {d30-d31}, [%3 :64] \n" // r24 r25 "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q13, %q15, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q16, q15 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vshrn.u32 d22, q12, #16 \n" "vshrn.u32 d23, q13, #16 \n" "vst1.u16 {d20-d23}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1] \n" // r00 r01 r02 r03 "mov v18.16b, %17.16b \n" // sum00 "mov v19.16b, %17.16b \n" // sum01 "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmul v16.4s, %8.4s, v12.4s \n" "fmul v17.4s, %8.4s, v13.4s \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v14.4s \n" "fmla v17.4s, %10.4s, v15.4s \n" "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "fmla v18.4s, %11.4s, v20.4s \n" "fmla v19.4s, %11.4s, v21.4s \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "fmla v16.4s, %12.4s, v21.4s \n" "fmla v17.4s, %12.4s, v22.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v22.4s \n" "fmla v19.4s, %13.4s, v23.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "add %1, %1, #16 \n" "fmla v16.4s, %16.4s, v14.4s \n" "fmla v17.4s, %16.4s, v15.4s \n" "add %2, %2, #16 \n" "fadd v18.4s, v18.4s, v16.4s \n" "fadd v19.4s, v19.4s, v17.4s \n" "add %3, %3, #16 \n" "shrn v18.4h, v18.4s, #16 \n" "shrn v19.4h, v19.4s, #16 \n" "st1 {v18.4h, v19.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.u16 {d28-d31}, [%1 :64] \n" // r00 r01 r02 r03 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q8, q12 \n" "vmla.f32 q11, %q8, q13 \n" "vshll.u16 q14, d30, #16 \n" "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q9, q14 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q10, q14 \n" "vmla.f32 q11, %q10, q15 \n" "pld [%2, #256] \n" "vld1.u16 {d28-d31}, [%2 :64] \n" // r10 r11 r12 r13 "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q11, q12 \n" "vmla.f32 q11, %q11, q13 \n" "vshll.u16 q14, d30, #16 \n" "vmla.f32 q10, %q12, q13 \n" "vmla.f32 q11, %q12, q14 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q11, %q13, q15 \n" "pld [%3, #256] \n" "vld1.u16 {d28-d31}, [%3 :64] \n" // r20 r21 r22 r23 "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q14, q12 \n" "vmla.f32 q11, %q14, q13 \n" "vshll.u16 q14, d30, #16 \n" "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q15, q14 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q16, q15 \n" "add %1, %1, #16 \n" "add %2, %2, #16 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "add %3, %3, #16 \n" "vst1.u16 {d20-d21}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4)); float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8)); float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2)); float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4)); float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8)); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1_u16(outptr0, vcvt_bf16_f32(_sum0)); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const unsigned short* k0 = kernel.row<const unsigned short>(g); unsigned short* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const unsigned short* r0 = img0.row<const unsigned short>(0); const unsigned short* r1 = img0.row<const unsigned short>(1); const unsigned short* r2 = img0.row<const unsigned short>(2); float32x4_t _k00 = vcvt_f32_bf16(vld1_u16(k0)); float32x4_t _k01 = vcvt_f32_bf16(vld1_u16(k0 + 4)); float32x4_t _k02 = vcvt_f32_bf16(vld1_u16(k0 + 8)); float32x4_t _k10 = vcvt_f32_bf16(vld1_u16(k0 + 12)); float32x4_t _k11 = vcvt_f32_bf16(vld1_u16(k0 + 16)); float32x4_t _k12 = vcvt_f32_bf16(vld1_u16(k0 + 20)); float32x4_t _k20 = vcvt_f32_bf16(vld1_u16(k0 + 24)); float32x4_t _k21 = vcvt_f32_bf16(vld1_u16(k0 + 28)); float32x4_t _k22 = vcvt_f32_bf16(vld1_u16(k0 + 32)); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #256] \n" "ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%1], #32 \n" // r04 r05 r06 r07 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v18.4h}, [%1] \n" // r08 "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v28.4s, %8.4s, v10.4s \n" "fmla v29.4s, %8.4s, v12.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, %8.4s, v14.4s \n" "fmla v31.4s, %8.4s, v16.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, %9.4s, v11.4s \n" "fmla v29.4s, %9.4s, v13.4s \n" "fmla v30.4s, %9.4s, v15.4s \n" "fmla v31.4s, %9.4s, v17.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" // r10 r11 r12 r13 "fmla v28.4s, %10.4s, v12.4s \n" "fmla v29.4s, %10.4s, v14.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, %10.4s, v16.4s \n" "fmla v31.4s, %10.4s, v18.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n" // r14 r15 r16 r17 "shll v20.4s, v20.4h, #16 \n" "shll v21.4s, v21.4h, #16 \n" "shll v22.4s, v22.4h, #16 \n" "shll v23.4s, v23.4h, #16 \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v19.4h}, [%2] \n" // r18 "shll v24.4s, v24.4h, #16 \n" "shll v25.4s, v25.4h, #16 \n" "fmla v28.4s, %11.4s, v20.4s \n" "fmla v29.4s, %11.4s, v22.4s \n" "shll v26.4s, v26.4h, #16 \n" "fmla v30.4s, %11.4s, v24.4s \n" "fmla v31.4s, %11.4s, v26.4s \n" "shll v27.4s, v27.4h, #16 \n" "fmla v28.4s, %12.4s, v21.4s \n" "fmla v29.4s, %12.4s, v23.4s \n" "fmla v30.4s, %12.4s, v25.4s \n" "fmla v31.4s, %12.4s, v27.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23 "fmla v28.4s, %13.4s, v22.4s \n" "fmla v29.4s, %13.4s, v24.4s \n" "shll v19.4s, v19.4h, #16 \n" "fmla v30.4s, %13.4s, v26.4s \n" "fmla v31.4s, %13.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%3], #32 \n" // r24 r25 r26 r27 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v18.4h}, [%3] \n" // r28 "shll v14.4s, v14.4h, #16 \n" "shll v15.4s, v15.4h, #16 \n" "fmla v28.4s, %14.4s, v10.4s \n" "fmla v29.4s, %14.4s, v12.4s \n" "shll v16.4s, v16.4h, #16 \n" "fmla v30.4s, %14.4s, v14.4s \n" "fmla v31.4s, %14.4s, v16.4s \n" "shll v17.4s, v17.4h, #16 \n" "fmla v28.4s, %15.4s, v11.4s \n" "fmla v29.4s, %15.4s, v13.4s \n" "fmla v30.4s, %15.4s, v15.4s \n" "fmla v31.4s, %15.4s, v17.4s \n" "fmla v28.4s, %16.4s, v12.4s \n" "fmla v29.4s, %16.4s, v14.4s \n" "shll v18.4s, v18.4h, #16 \n" "fmla v30.4s, %16.4s, v16.4s \n" "fmla v31.4s, %16.4s, v18.4s \n" "shrn v28.4h, v28.4s, #16 \n" "shrn v29.4h, v29.4s, #16 \n" "shrn v30.4h, v30.4s, #16 \n" "shrn v31.4h, v31.4s, #16 \n" "st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif // __aarch64__ for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n" // r00 r01 r02 r03 "mov v22.16b, %17.16b \n" // sum00 "mov v23.16b, %17.16b \n" // sum01 "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmul v20.4s, %8.4s, v10.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmul v21.4s, %8.4s, v12.4s \n" "prfm pldl1keep, [%1, #64] \n" "ld1 {v14.4h}, [%1] \n" // r04 "fmla v22.4s, %9.4s, v11.4s \n" "fmla v23.4s, %9.4s, v13.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" // r10 r11 r12 r13 "shll v14.4s, v14.4h, #16 \n" "fmla v20.4s, %10.4s, v12.4s \n" "fmla v21.4s, %10.4s, v14.4s \n" "shll v16.4s, v16.4h, #16 \n" "shll v17.4s, v17.4h, #16 \n" "fmla v22.4s, %11.4s, v16.4s \n" "shll v18.4s, v18.4h, #16 \n" "shll v19.4s, v19.4h, #16 \n" "fmla v23.4s, %11.4s, v18.4s \n" "prfm pldl1keep, [%2, #64] \n" "ld1 {v15.4h}, [%2] \n" // r14 "fmla v20.4s, %12.4s, v17.4s \n" "fmla v21.4s, %12.4s, v19.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n" // r20 r21 r22 r23 "shll v15.4s, v15.4h, #16 \n" "fmla v22.4s, %13.4s, v18.4s \n" "fmla v23.4s, %13.4s, v15.4s \n" "shll v10.4s, v10.4h, #16 \n" "shll v11.4s, v11.4h, #16 \n" "fmla v20.4s, %14.4s, v10.4s \n" "shll v12.4s, v12.4h, #16 \n" "shll v13.4s, v13.4h, #16 \n" "fmla v21.4s, %14.4s, v12.4s \n" "prfm pldl1keep, [%3, #64] \n" "ld1 {v14.4h}, [%3] \n" // r24 "fmla v22.4s, %15.4s, v11.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "shll v14.4s, v14.4h, #16 \n" "fmla v20.4s, %16.4s, v12.4s \n" "fmla v21.4s, %16.4s, v14.4s \n" "fadd v22.4s, v20.4s, v22.4s \n" "fadd v23.4s, v21.4s, v23.4s \n" "shrn v22.4h, v22.4s, #16 \n" "shrn v23.4h, v23.4s, #16 \n" "st1 {v22.4h, v23.4h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.u16 {d28-d31}, [%1 :64]! \n" // r00 r01 r02 r03 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q8, q12 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q11, %q8, q14 \n" "vld1.u16 {d25}, [%1] \n" // r04 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q9, q15 \n" "vshll.u16 q12, d25, #16 \n" "vmla.f32 q10, %q10, q14 \n" "pld [%2, #256] \n" "vld1.u16 {d28-d31}, [%2 :64]! \n" // r10 r11 r12 r13 "vmla.f32 q11, %q10, q12 \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q11, q12 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q11, %q11, q14 \n" "vld1.u16 {d25}, [%2] \n" // r14 "vmla.f32 q10, %q12, q13 \n" "vmla.f32 q11, %q12, q15 \n" "vshll.u16 q12, d25, #16 \n" "vmla.f32 q10, %q13, q14 \n" "pld [%3, #256] \n" "vld1.u16 {d28-d31}, [%3 :64]! \n" // r20 r21 r22 r23 "vmla.f32 q11, %q13, q12 \n" "vshll.u16 q12, d28, #16 \n" "vshll.u16 q13, d29, #16 \n" "vmla.f32 q10, %q14, q12 \n" "vshll.u16 q14, d30, #16 \n" "vshll.u16 q15, d31, #16 \n" "vmla.f32 q11, %q14, q14 \n" "vld1.u16 {d25}, [%3] \n" // r24 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q15, q15 \n" "vshll.u16 q12, d25, #16 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q16, q12 \n" "vshrn.u32 d20, q10, #16 \n" "vshrn.u32 d21, q11, #16 \n" "vst1.u16 {d20-d21}, [%0 :64]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vcvt_f32_bf16(vld1_u16(r0)); float32x4_t _r01 = vcvt_f32_bf16(vld1_u16(r0 + 4)); float32x4_t _r02 = vcvt_f32_bf16(vld1_u16(r0 + 8)); float32x4_t _r10 = vcvt_f32_bf16(vld1_u16(r1)); float32x4_t _r11 = vcvt_f32_bf16(vld1_u16(r1 + 4)); float32x4_t _r12 = vcvt_f32_bf16(vld1_u16(r1 + 8)); float32x4_t _r20 = vcvt_f32_bf16(vld1_u16(r2)); float32x4_t _r21 = vcvt_f32_bf16(vld1_u16(r2 + 4)); float32x4_t _r22 = vcvt_f32_bf16(vld1_u16(r2 + 8)); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1_u16(outptr0, vcvt_bf16_f32(_sum0)); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
tentusscher_epi_2004_S2_1.c
#include <assert.h> #include <stdlib.h> #include "tentusscher_epi_2004_S2_1.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
EntityList.h
// // Created by javier on 5/4/2020. // #ifndef GIGAENGINE_ENTITYLIST_H #define GIGAENGINE_ENTITYLIST_H #include "../Component/ComponentManager.h" #include "../Tag/TagBase.h" #include "../Tag/TagManager.h" #include <omp.h> #include <utility> #include <vector> #include <algorithm> using Entity = uint64_t; class EntityList { public: template<typename Func> void ForEach(Func function); template<typename T, typename Func> void ForEach(Func function); template<typename T1, typename T2, typename Func> void ForEach(Func function); template<typename T> EntityList OfType(); template<typename T1, typename T2> EntityList OfTypes() const; template<typename T> EntityList ExcludeType() const; template<typename T1, typename T2> EntityList ExcludeTypes() const; template<typename T1, typename T2, typename Func> void ParallelForEach(Func function); template<typename Func> void ParallelForEach(Func function); template<typename T, typename Func> void ParallelForEach(Func function); EntityList() = default; bool empty() const; size_t size() const; void Add(Entity entity); explicit EntityList(std::vector<Entity> entity); private: std::vector<Entity> entities_; friend class EntityManager; friend class TagManager; bool main_; bool sorted_; Entity back(); void clear(); void sort(); }; /*! * EntityList Conversion Constructor, convert a vector of entities to a EntityList * @param entity the vector of entities to add to a list */ inline EntityList::EntityList(std::vector<Entity> entity) : entities_(std::move(entity)), main_(false), sorted_(false) {} /*! * Gets the last entity in the list, for internal use only * @return the last entity in the list */ inline Entity EntityList::back() { return entities_.back(); } /*! * Adds a new entity to the list, for internal use only * @param entity the entity to add to the list */ inline void EntityList::Add(Entity entity) { entities_.push_back(entity); sorted_ = false; } /*! * Goes through all the entities in this list and calls the function given. * @tparam Func Represents the type of function that gets called on all the entities in the list.<br> * Function must contain an Entity reference parameter and return nothing.<br> * example: myFunc(Entity& ent) or [ ](Entity& ent){ //... } * @param function the function that gets called on all entities on this list. */ template<typename Func> void EntityList::ForEach(Func function) { //go through all entities for (auto &entity : entities_) { //execute the function given function(entity); } } /*! * Go through all entities in this list, get a specific component from them and call a function based on that component.<br> * If the certain entity does not have that type of component, it will be skipped. * @tparam T Represents the type of component to get from the entities in this list * @tparam Func Represents the type of function to pass in. <br> * Function type must contain a reference parameter of type T and return nothing.<br> * Example: myFunc(T& component) or [ ](T& component){ //... } * @param function The function to call on each entity in this list */ template<typename T, typename Func> void EntityList::ForEach(Func function) { //get the list of components of type T ComponentList<T> *list = ComponentManager::GetList<T>(); //if the list exists if (list) { //get all entities that are inside the list std::vector<Entity> ents = list->GetOverlappingEntities(entities_); //for every entity in the list, for (Entity entity : ents) { //get the component T *comp = ComponentManager::GetComponent<T>(entity); //call the function on the component if (comp) { function(*comp); } } } } /*! * Get all entities that contain 2 different types of components and store them in a new list. * @tparam T1 One Component type. * @tparam T2 Another component type. * @return a list containing all entities with components of the types given. */ template<typename T1, typename T2> EntityList EntityList::OfTypes() const { //store the new list of references to types std::vector<Entity> list; //get reference of all components of both types ComponentList<T1> *list1 = ComponentManager::GetList<T1>(); ComponentList<T2> *list2 = ComponentManager::GetList<T2>(); //if they both exist if (list1 && list2) { //If entity list is the main one, if (main_) { //get all overlapping entities from both components and store them into an EntityList return EntityList(list1->GetOverlappingEntities(list2->GetAllEnities())); } //Get the common entities from this list and T1's components std::vector<Entity> ent1 = list1->GetOverlappingEntities(entities_); //get the common entities from the last list and T2's components std::vector<Entity> ent2 = list2->GetOverlappingEntities(ent1); //send an EntityList with the overlapping Entities return EntityList(ent2); } //one (or both) entities do not have a list yet, return an empty list return EntityList(); } /*! * Get all components that contain the of the same type given and store them in a new list. * @tparam T The type of Component to find in all the entities in the list. * @return A list with all entities that contain a component of type T */ template<typename T> EntityList EntityList::OfType() { rttr::type t = rttr::type::get<T>(); if(t.is_derived_from<TagBase>()) { TagList<T>* list = TagManager::GetList<T>(); if(list) { if(!sorted_) sort(); std::vector<Entity> ents = list->GetOverlappingEntities(entities_); return EntityList(ents); } return EntityList(); } ComponentList<T> *list = ComponentManager::GetList<T>(); if (list) { //find overlapping entities between the component and this list std::vector<Entity> ents = list->GetOverlappingEntities(entities_); //send an EntityList that contains all entities from this list that has this type return EntityList(ents); } return EntityList(); } /*! * Get a list of entities that do not contain a component type given. * @tparam T The type of component to exclude * @return A new list that contains all the entities in this list that do not contain the component type given. */ template<typename T> EntityList EntityList::ExcludeType() const { EntityList result; ComponentList<T> *list = ComponentManager::GetList<T>(); if (list) { } return result; } /*! * Get a list of entities that do not contain 2 different types of components. * @tparam T1 One type of component that will get excluded. * @tparam T2 Another type of component that will get excluded from the new list. * @return A new list of entities that do not contain 2 different types of components. */ template<typename T1, typename T2> EntityList EntityList::ExcludeTypes() const { return EntityList(); } /*! * Goes through all the entities in the list, Gets the components of type T1 and T2, and calls a function with those * components as parameters. If the entity does not contain both, it will be skipped. * @tparam T1 The first component type * @tparam T2 The second component type * @tparam Func The type of function to call on all entities. * The function must be void and have a reference to T1 and T2 as parameters IN THAT ORDER.<br> * Example: myFunc(T1& comp1, T2& comp2) or [ ] (T1& comp1, T2 comp2) { //... } * @param function the function to call on all entities that contain T1 and T2 */ template<typename T1, typename T2, typename Func> void EntityList::ForEach(Func function) { ComponentList<T1> *list1 = ComponentManager::GetList<T1>(); ComponentList<T2> *list2 = ComponentManager::GetList<T2>(); if(entities_.empty() || !list2 || !list1) return; for (auto &entity : entities_) { T1 *comp1 = list1->GetComponent(entity); T2 *comp2 = list2->GetComponent(entity); if (comp1 && comp2) { function(*comp1, *comp2); } } } /*! * Goes through all the entities in the list in parallel, Gets the components of type T1 and T2, and calls a function with those * components as parameters. If the entity does not contain both, it will be skipped.<br> * Be careful, issues can arise when you parallelize code. If issues do arise, use ForEach instead. * @tparam T1 The first component type * @tparam T2 The second component type * @tparam Func The type of function to call on all entities. * The function must be void and have a reference to T1 and T2 as parameters IN THAT ORDER.<br> * Example: myFunc(T1& comp1, T2& comp2) or [ ] (T1& comp1, T2& comp2) { //... } * @param function the function to call on all entities that contain T1 and T2 */ template<typename T1, typename T2, typename Func> void EntityList::ParallelForEach(Func function) { ComponentList<T1> *list1 = ComponentManager::GetList<T1>(); ComponentList<T2> *list2 = ComponentManager::GetList<T2>(); if(!list1 || !list2 || entities_.empty()) return; #pragma omp parallel for schedule(static) shared(function, list1, list2) default(none) for (unsigned int i = 0; i < entities_.size(); ++i) { Entity entity = entities_[i]; T1 *comp1 = list1->GetComponent(entity); T2 *comp2 = list2->GetComponent(entity); if (comp1 && comp2) { function(*comp1, *comp2); } } } inline size_t EntityList::size() const { return entities_.size(); } inline bool EntityList::empty() const { return entities_.empty(); } inline void EntityList::clear() { return entities_.clear(); } inline void EntityList::sort() { std::sort(entities_.begin(), entities_.end()); sorted_ = true; } /*! * Go through all entities in this list in parallel, get a specific component from them and call a function based on that component.<br> * If the certain entity does not have that type of component, it will be skipped.<br> * Be careful, issues can arise when you parallelize code. If issues do arise, use ForEach instead. * @tparam T Represents the type of component to get from the entities in this list * @tparam Func Represents the type of function to pass in. <br> * Function type must contain a reference parameter of type T and return nothing.<br> * Example: myFunc(T& component) or [ ](T& component){ //... } * @param function The function to call on each entity in this list */ template<typename T, typename Func> void EntityList::ParallelForEach(Func function) { ComponentList<T> *list = ComponentManager::GetList<T>(); if (list) { #pragma omp parallel for schedule(dynamic) shared(function, list) default(none) for (unsigned int i = 0; i < entities_.size(); ++i) { Entity entity = entities_[i]; T *comp = list->GetComponent(entity); if (comp) { function(*comp); } } } } /*! * Goes through all the entities in this list in parallel and calls the function given.<br> * Be careful, issues can arise when you parallelize code. If issues do arise, use ForEach instead. * @tparam Func Represents the type of function that gets called on all the entities in the list.<br> * Function must contain an Entity reference parameter and return nothing.<br> * example: myFunc(Entity& ent) or [ ](Entity& ent){ //... } * @param function the function that gets called on all entities on this list. */ template<typename Func> void EntityList::ParallelForEach(Func function) { #pragma omp parallel for schedule(static) shared(function) default(none) for (unsigned int i = 0; i < entities_.size(); ++i) { function(entities_[i]); } } #endif //GIGAENGINE_ENTITYLIST_H
FFT.h
#pragma once #include <vector> #include <algorithm> #ifdef MULTICORE #include <omp.h> #endif #include <libff/algebra/field_utils/field_utils.hpp> #include <libfqfft/tools/exceptions.hpp> /** * Let F_p denote a field of prime order p. * The Discrete Fourier Transform (DFT) of a vector 'a[0...(n-1)]' where n = 2^k and a_i \in F_p is defined as: * * a_i = \sum_{j=0}^{n-1} { \omega_n^{i j} \cdot a_j } * * Here, \omega is a primitive nth root of unity in F_p. * Typically, the a_i's are also from the field F_p. * However, in some cases we might want the a_j's to be elements of a group, say an elliptic curve. * In that case, \omega_n^{i j} \cdot a_j is actually a scalar multiplication in the elliptic curve group. * * This is why we use GroupType here to indicate the type of group elements the a_i's are. * Note: We refer to the DFT as FFT in this code. */ namespace libutt { using libfqfft::DomainSizeException; /** * A modification of libfqfft's code, which is based on pseudocode from [CLRS 2n Ed, pp. 864]. * This is the non-parallelized version. * Also, note that it's the caller's responsibility to multiply by 1/N when using this for an inverse DFT. */ template<typename GroupT, typename FieldT> void FFT_serial(std::vector<GroupT> &a, const FieldT &omega) { const size_t n = a.size(), logn = libff::log2(n); if (n != (1u << logn)) throw DomainSizeException("expected n == (1u << logn)"); /* swapping in place (from Storer's book) */ for (size_t k = 0; k < n; ++k) { const size_t rk = libff::bitreverse(k, logn); if (k < rk) std::swap(a[k], a[rk]); } size_t m = 1; // invariant: m = 2^{s-1} for (size_t s = 1; s <= logn; ++s) { // w_m is 2^s-th root of unity now const FieldT w_m = omega^(n/(2*m)); asm volatile ("/* pre-inner */"); for (size_t k = 0; k < n; k += 2*m) { FieldT w = FieldT::one(); for (size_t j = 0; j < m; ++j) { const GroupT t = w * a[k+j+m]; a[k+j+m] = a[k+j] - t; a[k+j] = a[k+j] + t; w *= w_m; } } asm volatile ("/* post-inner */"); m *= 2; } } template<typename GroupT, typename FieldT> void FFT(std::vector<GroupT> &a) { size_t n = libff::get_power_of_two(a.size()); FieldT omega = libff::get_root_of_unity<FieldT>(n); #ifdef MULTICORE # error "Did not test the parallel FFT path yet" #else FFT_serial<GroupT, FieldT>(a, omega); #endif } template<typename GroupT, typename FieldT> void invFFT(std::vector<GroupT> &a) { size_t n = libff::get_power_of_two(a.size()); FieldT omega = libff::get_root_of_unity<FieldT>(n); #ifdef MULTICORE # error "Did not test the parallel FFT path yet" #else FFT_serial<GroupT, FieldT>(a, omega.inverse()); #endif const FieldT sconst = FieldT(static_cast<long>(n)).inverse(); for(size_t i = 0; i < n; i++) { a[i] = sconst * a[i]; } } template<typename GroupT, typename FieldT> void FFT_parallel(std::vector<GroupT> &a, const FieldT &omega) { #ifdef MULTICORE const size_t num_cpus = omp_get_max_threads(); #else const size_t num_cpus = 1; #endif const size_t log_cpus = ((num_cpus & (num_cpus - 1)) == 0 ? libff::log2(num_cpus) : libff::log2(num_cpus) - 1); if (log_cpus == 0) FFT_serial(a, omega); else FFT_parallel_inner(a, omega, log_cpus); } template<typename GroupT, typename FieldT> void FFT_parallel_inner(std::vector<FieldT> &a, const FieldT &omega, const size_t log_cpus) { const size_t num_cpus = 1ul<<log_cpus; const size_t m = a.size(); const size_t log_m = libff::log2(m); if (m != 1ul<<log_m) throw DomainSizeException("expected m == 1ul<<log_m"); if (log_m < log_cpus) { FFT_serial(a, omega); return; } std::vector<std::vector<FieldT> > tmp(num_cpus); for (size_t j = 0; j < num_cpus; ++j) { tmp[j].resize(1ul<<(log_m-log_cpus), FieldT::zero()); } #ifdef MULTICORE #pragma omp parallel for #endif for (size_t j = 0; j < num_cpus; ++j) { const FieldT omega_j = omega^j; const FieldT omega_step = omega^(j<<(log_m - log_cpus)); FieldT elt = FieldT::one(); for (size_t i = 0; i < 1ul<<(log_m - log_cpus); ++i) { for (size_t s = 0; s < num_cpus; ++s) { // invariant: elt is omega^(j*idx) const size_t idx = (i + (s<<(log_m - log_cpus))) % (1u << log_m); tmp[j][i] += elt * a[idx]; elt *= omega_step; } elt *= omega_j; } } const FieldT omega_num_cpus = omega^num_cpus; #ifdef MULTICORE #pragma omp parallel for #endif for (size_t j = 0; j < num_cpus; ++j) { FFT_serial(tmp[j], omega_num_cpus); } #ifdef MULTICORE #pragma omp parallel for #endif for (size_t i = 0; i < num_cpus; ++i) { for (size_t j = 0; j < 1ul<<(log_m - log_cpus); ++j) { // now: i = idx >> (log_m - log_cpus) and j = idx % (1u << (log_m - log_cpus)), for idx = ((i<<(log_m-log_cpus))+j) % (1u << log_m) a[(j<<log_cpus) + i] = tmp[i][j]; } } } } // end of libutt
dz2z4.c
#include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "timer.h" char res_seq[100]; char res_par[100]; #define DIM 2 /* Two-dimensional system */ #define X 0 /* x-coordinate subscript */ #define Y 1 /* y-coordinate subscript */ const double G = 6.673e-11; typedef double vect_t[DIM]; /* Vector type for position, etc. */ // vect_t forces_reduction[4999][5000]; struct particle_s { double m; /* Mass */ vect_t s; /* Position */ vect_t v; /* Velocity */ }; int rank, size; MPI_Datatype particle_s_type; MPI_Datatype vect_t_type; enum Tags { TAG_CURR = 1000, TAG_START_IND, TAG_WORK_FINISH, TAG_FORCES_SEND, TAG_FORCES, }; void Usage(char *prog_name); void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p, double *delta_t_p, int *output_freq_p, char *g_i_p); void Get_init_cond(struct particle_s curr[], int n); void Gen_init_cond(struct particle_s curr[], int n); void Output_state(double time, struct particle_s curr[], int n); void Compute_force(int part, vect_t forces[], struct particle_s curr[], int n); void Compute_force_parallel(int part, vect_t forces[], struct particle_s curr[], int n); void Update_part(int part, vect_t forces[], struct particle_s curr[], int n, double delta_t); void Compute_energy(struct particle_s curr[], int n, double *kin_en_p, double *pot_en_p); void sequential_solution(int argc, char *argv[]) { int n; /* Number of particles */ int n_steps; /* Number of timesteps */ int step; /* Current step */ int part; /* Current particle */ int output_freq; /* Frequency of output */ double delta_t; /* Size of timestep */ double t; /* Current Time */ struct particle_s *curr; /* Current state of system */ vect_t *forces; /* Forces on each particle */ char g_i; /*_G_en or _i_nput init conds */ double kinetic_energy, potential_energy; double start, finish; /* For timings */ Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i); curr = malloc(n * sizeof(struct particle_s)); forces = malloc(n * sizeof(vect_t)); if (g_i == 'i') Get_init_cond(curr, n); else Gen_init_cond(curr, n); GET_TIME(start); Compute_energy(curr, n, &kinetic_energy, &potential_energy); printf(" PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); Output_state(0, curr, n); for (step = 1; step <= n_steps; step++) { t = step * delta_t; memset(forces, 0, n * sizeof(vect_t)); for (part = 0; part < n - 1; part++) Compute_force(part, forces, curr, n); for (part = 0; part < n; part++) Update_part(part, forces, curr, n, delta_t); Compute_energy(curr, n, &kinetic_energy, &potential_energy); } Output_state(t, curr, n); printf(" PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); sprintf(res_seq, " PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); GET_TIME(finish); printf("Elapsed time = %e seconds\n", finish - start); free(curr); free(forces); } /* sequential_solution */ void parallel_solution(int argc, char *argv[]) { int n; /* Number of particles */ int n_steps; /* Number of timesteps */ int step; /* Current step */ int part; /* Current particle */ int output_freq; /* Frequency of output */ double delta_t; /* Size of timestep */ double t; /* Current Time */ struct particle_s *curr; /* Current state of system */ vect_t *forces; /* Forces on each particle */ char g_i; /*_G_en or _i_nput init conds */ double kinetic_energy, potential_energy; double start, finish; /* For timings */ if (rank == 0) { Get_args(argc, argv, &n, &n_steps, &delta_t, &output_freq, &g_i); curr = malloc(n * sizeof(struct particle_s)); forces = malloc(n * sizeof(vect_t)); if (g_i == 'i') Get_init_cond(curr, n); else Gen_init_cond(curr, n); GET_TIME(start); Compute_energy(curr, n, &kinetic_energy, &potential_energy); printf(" PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); Output_state(0, curr, n); } MPI_Bcast(&n_steps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&delta_t, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); if (rank != 0) { curr = malloc(n * sizeof(struct particle_s)); forces = malloc(n * sizeof(vect_t)); } vect_t *forces_reduced = malloc(n * sizeof(vect_t)); for (step = 1; step <= n_steps; step++) { int part_start, part_end; int chunk = 50; int particles_done = 0; int end_flag = -1; int current_start = 0; memset(forces, 0, n * sizeof(vect_t)); MPI_Bcast(curr, n, particle_s_type, 0, MPI_COMM_WORLD); if (rank == 0) { MPI_Request req; for (int i = 0; i < size - 1; i++) { if (current_start + chunk >= n) { MPI_Send(&end_flag, 1, MPI_INT, i + 1, TAG_START_IND, MPI_COMM_WORLD); } else { current_start = i * chunk; MPI_Send(&current_start, 1, MPI_INT, i + 1, TAG_START_IND, MPI_COMM_WORLD); } } while (particles_done < n) { int worker_finished; MPI_Status status; MPI_Recv(&worker_finished, 1, MPI_INT, MPI_ANY_SOURCE, TAG_WORK_FINISH, MPI_COMM_WORLD, &status); particles_done += 50; current_start += 50; if (current_start < n) { MPI_Send(&current_start, 1, MPI_INT, status.MPI_SOURCE, TAG_START_IND, MPI_COMM_WORLD); } else { MPI_Send(&end_flag, 1, MPI_INT, status.MPI_SOURCE, TAG_START_IND, MPI_COMM_WORLD); } } } else { MPI_Recv(&current_start, 1, MPI_INT, 0, TAG_START_IND, MPI_COMM_WORLD, MPI_STATUS_IGNORE); while (current_start != -1) { int current_end = current_start + chunk; for (part = current_start; part < current_end; part++) Compute_force(part, forces, curr, n); MPI_Send(&current_start, 1, MPI_INT, 0, TAG_WORK_FINISH, MPI_COMM_WORLD); MPI_Recv(&current_start, 1, MPI_INT, 0, TAG_START_IND, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } MPI_Reduce(forces, forces_reduced, n * 2, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); if (rank == 0) for (part = 0; part < n; part++) Update_part(part, forces_reduced, curr, n, delta_t); } if (rank == 0) { t = step * delta_t; Compute_energy(curr, n, &kinetic_energy, &potential_energy); Output_state(t, curr, n); printf(" PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); sprintf(res_par, " PE = %e, KE = %e, Total Energy = %e\n", potential_energy, kinetic_energy, kinetic_energy + potential_energy); GET_TIME(finish); printf("Elapsed time = %e seconds\n", finish - start); } free(curr); free(forces); free(forces_reduced); } /* parallel_solution */ int compare_results(void) { return !strcmp(res_seq, res_par); } int main(int argc, char *argv[]) { MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); // define types for communication MPI_Type_contiguous(sizeof(vect_t) / sizeof(double), MPI_DOUBLE, &vect_t_type); MPI_Type_commit(&vect_t_type); MPI_Type_contiguous(sizeof(struct particle_s) / sizeof(double), MPI_DOUBLE, &particle_s_type); MPI_Type_commit(&particle_s_type); double start_time_seq, end_time_seq, start_time_parallel, end_time_parallel; if (rank == 0) { printf("---------------------Sequential execution---------------------\n"); start_time_seq = MPI_Wtime(); sequential_solution(argc, argv); end_time_seq = MPI_Wtime(); printf("----------------------Parallel execution----------------------\n"); start_time_parallel = MPI_Wtime(); } parallel_solution(argc, argv); if (rank == 0) { end_time_parallel = MPI_Wtime(); printf("\nSequential elapsed time: %lfs\n", end_time_seq - start_time_seq); printf("Parallel elapsed time: %lfs\n", end_time_parallel - start_time_parallel); if (compare_results()) printf("Test PASSED\n"); else printf("Test FAILED\n"); } MPI_Type_free(&vect_t_type); MPI_Type_free(&particle_s_type); MPI_Finalize(); return 0; } /* main */ void Usage(char *prog_name) { fprintf(stderr, "usage: %s <number of particles> <number of timesteps>\n", prog_name); fprintf(stderr, " <size of timestep> <output frequency>\n"); fprintf(stderr, " <g|i>\n"); fprintf(stderr, " 'g': program should generate init conds\n"); fprintf(stderr, " 'i': program should get init conds from stdin\n"); exit(0); } /* Usage */ void Get_args(int argc, char *argv[], int *n_p, int *n_steps_p, double *delta_t_p, int *output_freq_p, char *g_i_p) { if (argc != 6) Usage(argv[0]); *n_p = strtol(argv[1], NULL, 10); *n_steps_p = strtol(argv[2], NULL, 10); *delta_t_p = strtod(argv[3], NULL); *output_freq_p = strtol(argv[4], NULL, 10); *g_i_p = argv[5][0]; if (*n_p <= 0 || *n_steps_p < 0 || *delta_t_p <= 0) Usage(argv[0]); if (*g_i_p != 'g' && *g_i_p != 'i') Usage(argv[0]); } /* Get_args */ void Get_init_cond(struct particle_s curr[], int n) { int part; printf("For each particle, enter (in order):\n"); printf(" its mass, its x-coord, its y-coord, "); printf("its x-velocity, its y-velocity\n"); for (part = 0; part < n; part++) { scanf("%lf", &curr[part].m); scanf("%lf", &curr[part].s[X]); scanf("%lf", &curr[part].s[Y]); scanf("%lf", &curr[part].v[X]); scanf("%lf", &curr[part].v[Y]); } } /* Get_init_cond */ void Gen_init_cond(struct particle_s curr[], int n) { int part; double mass = 5.0e24; double gap = 1.0e5; double speed = 3.0e4; srandom(1); for (part = 0; part < n; part++) { curr[part].m = mass; curr[part].s[X] = part * gap; curr[part].s[Y] = 0.0; curr[part].v[X] = 0.0; if (part % 2 == 0) curr[part].v[Y] = speed; else curr[part].v[Y] = -speed; } } /* Gen_init_cond */ void Output_state(double time, struct particle_s curr[], int n) { int part; printf("%.2f\n", time); for (part = 0; part < n; part++) { printf("%3d %10.3e ", part, curr[part].s[X]); printf(" %10.3e ", curr[part].s[Y]); printf(" %10.3e ", curr[part].v[X]); printf(" %10.3e\n", curr[part].v[Y]); } printf("\n"); } /* Output_state */ void Compute_force(int part, vect_t forces[], struct particle_s curr[], int n) { int k; double mg; vect_t f_part_k; double len, len_3, fact; // #pragma omp parallel for private(f_part_k, len, len_3, mg, fact) for (k = part + 1; k < n; k++) { f_part_k[X] = curr[part].s[X] - curr[k].s[X]; f_part_k[Y] = curr[part].s[Y] - curr[k].s[Y]; len = sqrt(f_part_k[X] * f_part_k[X] + f_part_k[Y] * f_part_k[Y]); len_3 = len * len * len; mg = -G * curr[part].m * curr[k].m; fact = mg / len_3; f_part_k[X] *= fact; f_part_k[Y] *= fact; // #pragma omp atomic forces[part][X] += f_part_k[X]; // #pragma omp atomic forces[part][Y] += f_part_k[Y]; forces[k][X] -= f_part_k[X]; forces[k][Y] -= f_part_k[Y]; } } /* Compute_force */ void Update_part(int part, vect_t forces[], struct particle_s curr[], int n, double delta_t) { double fact = delta_t / curr[part].m; curr[part].s[X] += delta_t * curr[part].v[X]; curr[part].s[Y] += delta_t * curr[part].v[Y]; curr[part].v[X] += fact * forces[part][X]; curr[part].v[Y] += fact * forces[part][Y]; } /* Update_part */ void Compute_energy(struct particle_s curr[], int n, double *kin_en_p, double *pot_en_p) { int i, j; vect_t diff; double pe = 0.0, ke = 0.0; double dist, speed_sqr; for (i = 0; i < n; i++) { speed_sqr = curr[i].v[X] * curr[i].v[X] + curr[i].v[Y] * curr[i].v[Y]; ke += curr[i].m * speed_sqr; } ke *= 0.5; for (i = 0; i < n - 1; i++) { for (j = i + 1; j < n; j++) { diff[X] = curr[i].s[X] - curr[j].s[X]; diff[Y] = curr[i].s[Y] - curr[j].s[Y]; dist = sqrt(diff[X] * diff[X] + diff[Y] * diff[Y]); pe += -G * curr[i].m * curr[j].m / dist; } } *kin_en_p = ke; *pot_en_p = pe; } /* Compute_energy */
vmul.c
#include <stdio.h> #include "assert.h" #include <unistd.h> void vmul(int*a, int*b, int*c, int N){ #pragma omp target map(to: a[0:N],b[0:N]) map(from:c[0:N]) #pragma omp teams distribute parallel for for(int i=0;i<N;i++) { c[i]=a[i]*b[i]; } } int main(){ const int N = 100000; int a[N],b[N],c[N],validate[N]; int flag=-1; // Mark Success for(int i=0;i<N;i++) { a[i]=i+1; b[i]=i+2; validate[i]=a[i]*b[i]; } vmul(a,b,c,N); for(int i=0;i<N;i++) { if(c[i]!=validate[i]) { // print 1st bad index if( flag == -1 ) printf("First fail: c[%d](%d) != validate[%d](%d)\n",i,c[i],i,validate[i]); flag = i; } } if( flag == -1 ){ printf("Success\n"); return 0; } else { printf("Last fail: c[%d](%d) != validate[%d](%d)\n",flag,c[flag],flag,validate[flag]); printf("Fail\n"); return 1; } }
jacobi-ompacc-opt1.c
// Using target data to promote data allocation to higher level, enabling reusing in iterations #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!! REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; // value, reference value, and the number of significant digits to be ensured. double diff_ratio (double val, double ref, int significant_digits) { assert (significant_digits>=1); double diff_ratio = fabs(val - ref )/fabs(ref); double upper_limit = pow (0.1, significant_digits); // 1.0/(double(10^significant_digits)) ; printf("value :%E ref_value: %E diff_ratio: %E upper_limit: %E \n",val, ref, diff_ratio, upper_limit); // ensure the number of the significant digits to be the same assert ( diff_ratio < upper_limit); return diff_ratio; } int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; // An optimization on top of naive coding: promoting data handling outside the while loop // data properties may change since the scope is bigger: #pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m]) while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ //#pragma omp parallel // { #pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m]) #pragma omp parallel for private(j,i) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref) < 1E-13); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf ("Diff ref=%E\n", fabs(error-error_ref)); assert (fabs(error-error_ref) < 1E-13); }
omp-matmat-three-parallel.c
/***************************************************************************** Example : omp-matmat-three-parallel.c Objective : Matrix - Matrix Multiplication using OpenMP three PARALLEL for directive and Private Clause Input : Size of Matrices(i.e Size of Matrix A and Matrix B) ie in terms of CLASS where CLASS A :1024; CLASS B: 2048 and CLASS C: 4096 Number of Threads . Output : Number of Threads Total Memory Utilized for the Matrix - Matrix Computation Total Time Taken for Matrix - Matrix Computaion. Created :Aug 2011 . Author : RarchK *********************************************************************************/ #include <stdio.h> #include <sys/time.h> #include <omp.h> #include <stdlib.h> /* Function declaration */ double Matrix_Multiplication_Three(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads); /* Main Program */ main(int argc , char * argv[]) { int CLASS_SIZE,N_size, i,j,k,Total_threads,THREADS; double Total_overhead = 0.0; double **Matrix_A, **Matrix_B, **Result; double memoryused=0.0; int iteration; FILE *fp; char * CLASS; printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Email : RarchK"); printf("\n\t\t---------------------------------------------------------------------------"); printf("\n\t\t Objective : Dense Matrix Computations (Floating Point Operations)\n "); printf("\n\t\t Matrix into Matrix Multiplication using "); printf("\n\t\t OpenMP three PARALLEL for directive and Private Clause;"); printf("\n\t\t..........................................................................\n"); /* Checking for the command line arguments */ if( argc != 3 ){ printf("\t\t Very Few Arguments\n "); printf("\t\t Syntax : exec <Class-Size> <Threads>\n"); printf("\t\t Where : Class-Size must be A or B or C \n"); exit(-1); } else { CLASS = argv[1]; THREADS = atoi(argv[2]); } if( strcmp(CLASS, "A" )==0){ CLASS_SIZE = 1024; } else if( strcmp(CLASS, "B" )==0){ CLASS_SIZE = 2048; } else if( strcmp(CLASS, "C" )==0){ CLASS_SIZE = 4096; } else { printf("\n\t\t Class-Size must be A or B or C \n"); exit(-1); } N_size = CLASS_SIZE; Total_threads = THREADS; printf("\n\t\t Matrix Size : %d",N_size); printf("\n\t\t Threads : %d",Total_threads); printf("\n"); /* Matrix_A Elements */ Matrix_A = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_A[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_A[i][j] = (double)(rand()%10); Matrix_A[i][j] = i+j; } } /* Matrix_B Elements */ Matrix_B = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) { Matrix_B[i] = (double *) malloc(sizeof(double) * N_size); for (j = 0; j < N_size; j++) { // srand48((unsigned int)N_size); // Matrix_B[i][j] = (double)(rand()%10); Matrix_B[i][j] = i+j; } } /* Dynamic Memory Allocation */ Result = (double **) malloc(sizeof(double *) * N_size); for (i = 0; i < N_size; i++) Result[i] = (double *) malloc(sizeof(double) * N_size); memoryused = (3*(N_size*N_size))*sizeof(double); /* Function Calling */ Total_overhead = Matrix_Multiplication_Three(Matrix_A,Matrix_B,Result,N_size,Total_threads); printf("\n\t\t Memory Utilized : %lf MB \n",(memoryused/(1024*1024))); printf("\n\t\t Time in Seconds (T) : %lf Seconds \n",Total_overhead); printf("\n\t\t ( T represents the Time taken for the computation )"); printf("\n\t\t..........................................................................\n"); /* Free Memory */ free(Matrix_A); free(Matrix_B); free(Result); }/* Main function end */ /* Functions implementation */ double Matrix_Multiplication_Three(double **Matrix_A,double **Matrix_B,double **Result,int N_size,int Total_threads) { int i,j,k; struct timeval TimeValue_Start; struct timezone TimeZone_Start; struct timeval TimeValue_Final; struct timezone TimeZone_Final; long time_start, time_end; double time_overhead; gettimeofday(&TimeValue_Start, &TimeZone_Start); /* set the no. of threads */ omp_set_num_threads(Total_threads); /* OpenMP Three For Directive :Fork a team of threads giving them their own copies of variables * Spawn a parallel region explicitly scoping all variables */ #pragma omp parallel for private (j,k) shared (Matrix_A,Matrix_B,Result,N_size) num_threads(Total_threads) for (i = 0; i < N_size; i = i + 1){ #pragma omp parallel for private(k) shared (Matrix_A,Matrix_B,Result,N_size) num_threads(Total_threads) for (j = 0; j < N_size; j = j + 1){ Result[i][j]=0.0; #pragma omp parallel for private(k) shared (Matrix_A,Matrix_B,Result,N_size) num_threads(Total_threads) for (k = 0; k < N_size; k = k + 1) Result[i][j] = Result[i][j] + Matrix_A[i][k] * Matrix_B[k][j]; } }/* end of parallel section */ gettimeofday(&TimeValue_Final, &TimeZone_Final); /* calculate the timing for the computation */ time_start = TimeValue_Start.tv_sec * 1000000 + TimeValue_Start.tv_usec; time_end = TimeValue_Final.tv_sec * 1000000 + TimeValue_Final.tv_usec; time_overhead = (time_end - time_start)/1000000.0; printf("\n\t\t Matrix into Matrix Multiplication using three Parallel for pragma......Done \n"); return time_overhead; }
grid_astar.h
/* * Copyright (c) 2014-2017, the neonavigation authors * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #ifndef PLANNER_CSPACE_GRID_ASTAR_H #define PLANNER_CSPACE_GRID_ASTAR_H #include <memory> #define _USE_MATH_DEFINES #include <cmath> #include <cfloat> #include <list> #include <map> #include <unordered_map> #include <vector> #include <boost/chrono.hpp> #include <planner_cspace/reservable_priority_queue.h> #include <planner_cspace/cyclic_vec.h> #include <planner_cspace/blockmem_gridmap.h> #include <omp.h> template <int DIM = 3, int NONCYCLIC = 2> class GridAstar { public: using Vec = CyclicVecInt<DIM, NONCYCLIC>; using Vecf = CyclicVecFloat<DIM, NONCYCLIC>; template <class T, int block_width = 0x20> class Gridmap : public BlockMemGridmap<T, DIM, NONCYCLIC, block_width> { using BlockMemGridmap<T, DIM, NONCYCLIC, block_width>::BlockMemGridmap; }; class PriorityVec { public: float p_; float p_raw_; Vec v_; PriorityVec() { p_ = 0; } PriorityVec(const float& p, const float& p_raw, const Vec& v) { p_ = p; p_raw_ = p_raw; v_ = v; } bool operator<(const PriorityVec& b) const { // smaller first return p_ > b.p_; } }; protected: Gridmap<float> g_; std::unordered_map<Vec, Vec, Vec> parents_; reservable_priority_queue<PriorityVec> open_; size_t queue_size_limit_; size_t search_task_num_; public: constexpr int getDim() const { return DIM; } constexpr int getNoncyclic() const { return NONCYCLIC; } void setSearchTaskNum(const size_t& search_task_num) { search_task_num_ = search_task_num; } void reset(const Vec size) { g_.reset(size); g_.clear(FLT_MAX); parents_.reserve(g_.ser_size() / 16); open_.reserve(g_.ser_size() / 16); } GridAstar() : queue_size_limit_(0) , search_task_num_(1) { } explicit GridAstar(const Vec size) { reset(size); queue_size_limit_ = 0; } void setQueueSizeLimit(const size_t size) { queue_size_limit_ = size; } bool search( const Vec& s, const Vec& e, std::list<Vec>& path, std::function<float(const Vec&, Vec&, const Vec&, const Vec&)> cb_cost, std::function<float(const Vec&, const Vec&)> cb_cost_estim, std::function<std::vector<Vec>&(const Vec&, const Vec&, const Vec&)> cb_search, std::function<bool(const std::list<Vec>&)> cb_progress, const float cost_leave, const float progress_interval, const bool return_best = false) { return searchImpl(g_, s, e, path, cb_cost, cb_cost_estim, cb_search, cb_progress, cost_leave, progress_interval, return_best); } bool searchImpl( Gridmap<float>& g, const Vec& st, const Vec& en, std::list<Vec>& path, std::function<float(const Vec&, Vec&, const Vec&, const Vec&)> cb_cost, std::function<float(const Vec&, const Vec&)> cb_cost_estim, std::function<std::vector<Vec>&(const Vec&, const Vec&, const Vec&)> cb_search, std::function<bool(const std::list<Vec>&)> cb_progress, const float cost_leave, const float progress_interval, const bool return_best = false) { if (st == en) { return false; } Vec s = st; Vec e = en; for (int i = NONCYCLIC; i < DIM; i++) { s.cycleUnsigned(s[i], g.size()[i]); e.cycleUnsigned(e[i], g.size()[i]); } g.clear(FLT_MAX); open_.clear(); parents_.clear(); g[s] = 0; open_.push(PriorityVec(cb_cost_estim(s, e), 0, s)); auto ts = boost::chrono::high_resolution_clock::now(); Vec better = s; int cost_estim_min = cb_cost_estim(s, e); while (true) { // Fetch tasks to be paralellized if (open_.size() < 1) { // No fesible path if (return_best) { findPath(s, better, path); } return false; } bool found(false); std::vector<PriorityVec> centers; for (size_t i = 0; i < search_task_num_; ++i) { if (open_.size() == 0) break; PriorityVec center = open_.top(); open_.pop(); if (center.v_ == e || center.p_ - center.p_raw_ <= cost_leave) { e = center.v_; found = true; break; } centers.push_back(center); } if (found) break; auto tnow = boost::chrono::high_resolution_clock::now(); if (boost::chrono::duration<float>(tnow - ts).count() >= progress_interval) { std::list<Vec> path_tmp; ts = tnow; findPath(s, better, path_tmp); cb_progress(path_tmp); } #pragma omp parallel for schedule(static) for (auto it = centers.begin(); it < centers.end(); ++it) { const Vec p = it->v_; const float c = it->p_raw_; const float c_estim = it->p_; float& gp = g[p]; if (c > gp) continue; if (c_estim - c < cost_estim_min) { cost_estim_min = c_estim - c; better = p; } const std::vector<Vec> search_list = cb_search(p, s, e); int updates = 0; for (auto it = search_list.begin(); it < search_list.end(); ++it) { while (1) { Vec next = p + *it; for (int i = NONCYCLIC; i < DIM; i++) { next.cycleUnsigned(next[i], g.size()[i]); } if ((unsigned int)next[0] >= (unsigned int)g.size()[0] || (unsigned int)next[1] >= (unsigned int)g.size()[1]) break; if (g[next] < 0) break; const float cost_estim = cb_cost_estim(next, e); if (cost_estim < 0 || cost_estim == FLT_MAX) break; const float cost = cb_cost(p, next, s, e); if (cost < 0 || cost == FLT_MAX) break; float& gnext = g[next]; if (gnext > c + cost) { gnext = c + cost; #pragma omp critical { parents_[next] = p; open_.push(PriorityVec(c + cost + cost_estim, c + cost, next)); if (queue_size_limit_ > 0 && open_.size() > queue_size_limit_) open_.pop_back(); } updates++; } break; } } if (updates == 0) { gp = -1; } } // printf("(parents %d)\n", (int)parents_.size()); } // printf("AStar search finished (parents %d)\n", (int)parents_.size()); return findPath(s, e, path); } bool findPath(const Vec& s, const Vec& e, std::list<Vec>& path) { Vec n = e; while (true) { path.push_front(n); // printf("p- %d %d %d %0.4f\n", n[0], n[1], n[2], g_[n]); if (n == s) break; if (parents_.find(n) == parents_.end()) { n = parents_[n]; // printf("px %d %d %d\n", n[0], n[1], n[2]); return false; } n = parents_[n]; } return true; } }; #endif // PLANNER_CSPACE_GRID_ASTAR_H
sections_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp sections { argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } } void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}} #pragma omp sections // expected-error@+1 {{unexpected OpenMP directive '#pragma omp sections'}} #pragma omp sections foo void test_no_clause() { int i; #pragma omp sections { foo(); } // expected-error@+2 {{the statement for '#pragma omp sections' must be a compound statement}} #pragma omp sections ++i; #pragma omp sections { foo(); foo(); // expected-error {{statement in 'omp sections' directive must be enclosed into a section region}} } #pragma omp parallel #pragma omp sections { { if (i == 6) return; // expected-error {{cannot return from OpenMP region}} } #pragma omp section { if (i == 6) return; // expected-error {{cannot return from OpenMP region}} } } } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp sections { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } #pragma omp section if (i == 5) goto L1; else if (i == 7) goto L3; else if (i == 8) { L3: x[i]++; } } #pragma omp parallel #pragma omp sections { #pragma omp section if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 7) goto L3; else if (i == 8) { L3: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; goto L3; // expected-error {{use of undeclared label 'L3'}} } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections foo bar { foo(); // expected-error@+1 {{unexpected OpenMP clause 'nowait' in directive '#pragma omp section'}} #pragma omp section nowait ; } } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections; { foo(); } #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp sections'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections linear(x); { foo(); } #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections private(x); { foo(); } #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp sections' are ignored}} #pragma omp sections, private(x); { foo(); } } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp sections private( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections private(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections private(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections private() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections private(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections private(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections private(x) { foo(); } #pragma omp parallel #pragma omp sections private(x, y) { foo(); } #pragma omp parallel #pragma omp sections private(x, y, z) { foo(); } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections lastprivate(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections lastprivate(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections lastprivate(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections lastprivate(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections lastprivate(x) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y, z) { foo(); } } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate( { foo(); } #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp sections firstprivate(, { foo(); } #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp sections firstprivate(, ) { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate() { foo(); } #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp sections firstprivate(int) { foo(); } #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp sections firstprivate(0) { foo(); } int x, y, z; #pragma omp parallel #pragma omp sections lastprivate(x) firstprivate(x) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y) firstprivate(x, y) { foo(); } #pragma omp parallel #pragma omp sections lastprivate(x, y, z) firstprivate(x, y, z) { foo(); } } void test_nowait() { #pragma omp parallel #pragma omp sections nowait nowait // expected-error {{directive '#pragma omp sections' cannot contain more than one 'nowait' clause}} { ; } }
GB_unop__identity_int32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int32_uint32 // op(A') function: GB_unop_tran__identity_int32_uint32 // C type: int32_t // A type: uint32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int32_uint32 ( int32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hello.c
#include <stdio.h> int main() { #pragma omp parallel { printf("Hello\n"); printf("world\n"); } return 0; }